If we are builging a small kernel [1], don't inline all these functions.

Saves over 2k and lets i386 rescue_tiny build again.
[1] if MALLOC_NOINLINE is defined - not ideal but...
This commit is contained in:
dsl 2005-06-10 22:00:52 +00:00
parent 6ffd1dd8c1
commit d68ddde829
1 changed files with 26 additions and 18 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.200 2005/06/02 17:01:44 matt Exp $ */
/* $NetBSD: uvm_map.c,v 1.201 2005/06/10 22:00:52 dsl Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.200 2005/06/02 17:01:44 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.201 2005/06/10 22:00:52 dsl Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -101,6 +101,13 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.200 2005/06/02 17:01:44 matt Exp $");
#include <uvm/uvm_ddb.h>
#endif
/* If we are trying to build a small kernel, don't inline much here. */
#ifdef MALLOC_NOINLINE
#define __INLINE
#else
#define __INLINE __inline
#endif
#ifndef UVMMAP_NOCOUNTERS
#include <sys/device.h>
struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
@ -293,7 +300,7 @@ uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
return (0);
}
static __inline void
static __INLINE void
uvm_rb_augment(struct vm_map_entry *entry)
{
@ -334,7 +341,7 @@ uvm_rb_subtree_space(const struct vm_map_entry *entry)
return (space);
}
static __inline void
static __INLINE void
uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
{
/* We need to traverse to the very top */
@ -344,7 +351,7 @@ uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
} while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
}
static __inline void
static __INLINE void
uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
{
vaddr_t space = uvm_rb_space(map, entry);
@ -361,7 +368,7 @@ uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
uvm_rb_fixup(map, entry->prev);
}
static __inline void
static __INLINE void
uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
{
struct vm_map_entry *parent;
@ -446,13 +453,15 @@ _uvm_tree_sanity(struct vm_map *map, const char *name)
* local inlines
*/
static __inline struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
#ifdef DIAGNOSTIC
static __INLINE struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
#endif
/*
* uvm_mapent_alloc: allocate a map entry
*/
static __inline struct vm_map_entry *
static __INLINE struct vm_map_entry *
uvm_mapent_alloc(struct vm_map *map, int flags)
{
struct vm_map_entry *me;
@ -477,7 +486,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags)
* uvm_mapent_alloc_split: allocate a map entry for clipping.
*/
static __inline struct vm_map_entry *
static __INLINE struct vm_map_entry *
uvm_mapent_alloc_split(struct vm_map *map,
const struct vm_map_entry *old_entry, int flags,
struct uvm_mapent_reservation *umr)
@ -510,7 +519,7 @@ uvm_mapent_alloc_split(struct vm_map *map,
* uvm_mapent_free: free map entry
*/
static __inline void
static __INLINE void
uvm_mapent_free(struct vm_map_entry *me)
{
UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
@ -531,7 +540,7 @@ uvm_mapent_free(struct vm_map_entry *me)
* => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
*/
static __inline void
static __INLINE void
uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
{
@ -564,7 +573,7 @@ uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
* uvm_mapent_copy: copy a map entry, preserving flags
*/
static __inline void
static __INLINE void
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
{
@ -578,7 +587,7 @@ uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
* => map should be locked by caller
*/
static __inline void
static __INLINE void
uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
{
@ -590,7 +599,7 @@ uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
/*
* wrapper for calling amap_ref()
*/
static __inline void
static __INLINE void
uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
{
@ -1426,7 +1435,7 @@ failed:
* entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
* fit, and -1 address wraps around.
*/
static __inline int
static __INLINE int
uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
vsize_t align, int topdown, struct vm_map_entry *entry)
{
@ -4075,10 +4084,8 @@ struct uvm_kmapent_hdr {
#define UVM_KHDR_FIND(entry) \
((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
static __inline struct vm_map_entry *uvm_kmapent_get(struct uvm_kmapent_hdr *);
static __inline void uvm_kmapent_put(struct uvm_kmapent_hdr *,
struct vm_map_entry *);
#ifdef DIAGNOSTIC
static __inline struct vm_map *
uvm_kmapent_map(struct vm_map_entry *entry)
{
@ -4087,6 +4094,7 @@ uvm_kmapent_map(struct vm_map_entry *entry)
ukh = UVM_KHDR_FIND(entry);
return ukh->ukh_map;
}
#endif
static __inline struct vm_map_entry *
uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)