2005-01-02 00:08:02 +03:00
|
|
|
/* $NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $ */
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-13 12:32:17 +04:00
|
|
|
* Copyright (c) 1987, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
1993-03-21 12:45:37 +03:00
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2005-01-02 00:08:02 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
1998-05-20 05:32:29 +04:00
|
|
|
#include "opt_lockdebug.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1993-12-18 06:59:02 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
1996-02-04 05:15:01 +03:00
|
|
|
#include <sys/systm.h>
|
1993-12-18 06:59:02 +03:00
|
|
|
|
1998-02-05 10:59:28 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
|
|
|
|
2005-01-02 00:02:12 +03:00
|
|
|
static struct vm_map_kernel kmem_map_store;
|
2001-06-02 22:09:08 +04:00
|
|
|
struct vm_map *kmem_map = NULL;
|
1998-02-05 10:59:28 +03:00
|
|
|
|
2000-02-11 22:22:52 +03:00
|
|
|
#include "opt_kmempages.h"
|
|
|
|
|
|
|
|
#ifdef NKMEMCLUSTERS
|
2000-05-27 03:18:26 +04:00
|
|
|
#error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
|
2000-02-11 22:22:52 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default number of pages in kmem_map. We attempt to calculate this
|
|
|
|
* at run-time, but allow it to be either patched or set in the kernel
|
|
|
|
* config file.
|
|
|
|
*/
|
|
|
|
#ifndef NKMEMPAGES
|
|
|
|
#define NKMEMPAGES 0
|
|
|
|
#endif
|
|
|
|
int nkmempages = NKMEMPAGES;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Defaults for lower- and upper-bounds for the kmem_map page count.
|
|
|
|
* Can be overridden by kernel config options.
|
|
|
|
*/
|
|
|
|
#ifndef NKMEMPAGES_MIN
|
|
|
|
#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NKMEMPAGES_MAX
|
|
|
|
#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
|
|
|
|
#endif
|
|
|
|
|
1997-02-03 00:22:16 +03:00
|
|
|
#include "opt_kmemstats.h"
|
1998-01-22 01:24:32 +03:00
|
|
|
#include "opt_malloclog.h"
|
2002-04-03 13:45:22 +04:00
|
|
|
#include "opt_malloc_debug.h"
|
1997-02-03 00:22:16 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
struct kmembuckets bucket[MINBUCKET + 16];
|
|
|
|
struct kmemusage *kmemusage;
|
|
|
|
char *kmembase, *kmemlimit;
|
2003-02-01 09:23:35 +03:00
|
|
|
|
|
|
|
struct malloc_type *kmemstatistics;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
|
|
|
#ifndef MALLOCLOGSIZE
|
|
|
|
#define MALLOCLOGSIZE 100000
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct malloclog {
|
|
|
|
void *addr;
|
|
|
|
long size;
|
2003-02-01 09:23:35 +03:00
|
|
|
struct malloc_type *type;
|
1998-01-22 01:24:32 +03:00
|
|
|
int action;
|
|
|
|
const char *file;
|
|
|
|
long line;
|
|
|
|
} malloclog[MALLOCLOGSIZE];
|
|
|
|
|
|
|
|
long malloclogptr;
|
|
|
|
|
|
|
|
static void
|
2003-02-01 09:23:35 +03:00
|
|
|
domlog(void *a, long size, struct malloc_type *type, int action,
|
|
|
|
const char *file, long line)
|
1998-01-22 01:24:32 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
malloclog[malloclogptr].addr = a;
|
|
|
|
malloclog[malloclogptr].size = size;
|
|
|
|
malloclog[malloclogptr].type = type;
|
|
|
|
malloclog[malloclogptr].action = action;
|
|
|
|
malloclog[malloclogptr].file = file;
|
|
|
|
malloclog[malloclogptr].line = line;
|
|
|
|
malloclogptr++;
|
|
|
|
if (malloclogptr >= MALLOCLOGSIZE)
|
|
|
|
malloclogptr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2001-12-05 02:56:36 +03:00
|
|
|
hitmlog(void *a)
|
1998-01-22 01:24:32 +03:00
|
|
|
{
|
|
|
|
struct malloclog *lp;
|
|
|
|
long l;
|
|
|
|
|
2001-12-05 02:56:36 +03:00
|
|
|
#define PRT do { \
|
2003-09-28 03:10:47 +04:00
|
|
|
lp = &malloclog[l]; \
|
|
|
|
if (lp->addr == a && lp->action) { \
|
1998-01-22 01:24:32 +03:00
|
|
|
printf("malloc log entry %ld:\n", l); \
|
|
|
|
printf("\taddr = %p\n", lp->addr); \
|
|
|
|
printf("\tsize = %ld\n", lp->size); \
|
2003-02-01 09:23:35 +03:00
|
|
|
printf("\ttype = %s\n", lp->type->ks_shortdesc); \
|
1998-01-22 01:24:32 +03:00
|
|
|
printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
|
|
|
|
printf("\tfile = %s\n", lp->file); \
|
|
|
|
printf("\tline = %ld\n", lp->line); \
|
2001-12-05 02:56:36 +03:00
|
|
|
} \
|
|
|
|
} while (/* CONSTCOND */0)
|
1998-01-22 01:24:32 +03:00
|
|
|
|
|
|
|
for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
|
2001-12-05 02:56:36 +03:00
|
|
|
PRT;
|
1998-01-22 01:24:32 +03:00
|
|
|
|
|
|
|
for (l = 0; l < malloclogptr; l++)
|
2001-12-05 02:56:36 +03:00
|
|
|
PRT;
|
2003-09-28 03:10:47 +04:00
|
|
|
#undef PRT
|
1998-01-22 01:24:32 +03:00
|
|
|
}
|
|
|
|
#endif /* MALLOCLOG */
|
|
|
|
|
1994-05-13 12:32:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* This structure provides a set of masks to catch unaligned frees.
|
|
|
|
*/
|
2001-01-18 23:28:15 +03:00
|
|
|
const long addrmask[] = { 0,
|
1994-05-13 12:32:17 +04:00
|
|
|
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
|
|
|
|
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
|
|
|
|
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
|
|
|
|
0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The WEIRD_ADDR is used as known text to copy into free objects so
|
|
|
|
* that modifications after frees can be detected.
|
|
|
|
*/
|
2002-11-10 06:35:31 +03:00
|
|
|
#define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
|
2000-11-24 03:34:32 +03:00
|
|
|
#ifdef DEBUG
|
2001-12-05 02:56:36 +03:00
|
|
|
#define MAX_COPY PAGE_SIZE
|
2000-11-24 03:34:32 +03:00
|
|
|
#else
|
2001-12-05 02:56:36 +03:00
|
|
|
#define MAX_COPY 32
|
2000-11-24 03:34:32 +03:00
|
|
|
#endif
|
1994-05-13 12:32:17 +04:00
|
|
|
|
|
|
|
/*
|
1995-05-02 02:39:11 +04:00
|
|
|
* Normally the freelist structure is used only to hold the list pointer
|
|
|
|
* for free objects. However, when running with diagnostics, the first
|
2003-02-01 09:23:35 +03:00
|
|
|
* 8/16 bytes of the structure is unused except for diagnostic information,
|
|
|
|
* and the free list pointer is at offset 8/16 in the structure. Since the
|
1995-05-02 02:39:11 +04:00
|
|
|
* first 8 bytes is the portion of the structure most often modified, this
|
|
|
|
* helps to detect memory reuse problems and avoid free list corruption.
|
1994-05-13 12:32:17 +04:00
|
|
|
*/
|
|
|
|
struct freelist {
|
2002-11-10 06:35:31 +03:00
|
|
|
uint32_t spare0;
|
2003-02-01 09:23:35 +03:00
|
|
|
#ifdef _LP64
|
|
|
|
uint32_t spare1; /* explicit padding */
|
|
|
|
#endif
|
|
|
|
struct malloc_type *type;
|
1994-05-13 12:32:17 +04:00
|
|
|
caddr_t next;
|
|
|
|
};
|
|
|
|
#else /* !DIAGNOSTIC */
|
|
|
|
struct freelist {
|
|
|
|
caddr_t next;
|
|
|
|
};
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
/*
|
|
|
|
* The following are standard, build-in malloc types are are not
|
|
|
|
* specific to any one subsystem.
|
|
|
|
*/
|
|
|
|
MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
|
|
|
|
MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
|
|
|
|
MALLOC_DEFINE(M_FREE, "free", "should be on free list");
|
|
|
|
MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
|
|
|
|
MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
|
|
|
|
MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
|
|
|
|
|
|
|
|
/* XXX These should all be elsewhere. */
|
|
|
|
MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
|
|
|
|
MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
|
|
|
|
MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
|
|
|
|
MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
|
|
|
|
MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
|
|
|
|
MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
|
|
|
|
MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
|
2004-09-05 03:29:44 +04:00
|
|
|
MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
|
|
|
|
|
2003-02-15 00:51:36 +03:00
|
|
|
struct simplelock malloc_slock = SIMPLELOCK_INITIALIZER;
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Allocate a block of memory
|
|
|
|
*/
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
|
|
|
void *
|
2003-02-01 09:23:35 +03:00
|
|
|
_malloc(unsigned long size, struct malloc_type *ksp, int flags,
|
|
|
|
const char *file, long line)
|
1998-01-22 01:24:32 +03:00
|
|
|
#else
|
1993-03-21 12:45:37 +03:00
|
|
|
void *
|
2003-02-01 09:23:35 +03:00
|
|
|
malloc(unsigned long size, struct malloc_type *ksp, int flags)
|
1998-01-22 01:24:32 +03:00
|
|
|
#endif /* MALLOCLOG */
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct kmembuckets *kbp;
|
|
|
|
struct kmemusage *kup;
|
|
|
|
struct freelist *freep;
|
1993-06-27 10:01:27 +04:00
|
|
|
long indx, npg, allocsize;
|
1993-03-21 12:45:37 +03:00
|
|
|
int s;
|
|
|
|
caddr_t va, cp, savedlist;
|
1994-05-13 12:32:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2002-11-10 06:35:31 +03:00
|
|
|
uint32_t *end, *lp;
|
1994-05-13 12:32:17 +04:00
|
|
|
int copysize;
|
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2001-06-05 08:39:02 +04:00
|
|
|
#ifdef LOCKDEBUG
|
|
|
|
if ((flags & M_NOWAIT) == 0)
|
|
|
|
simple_lock_only_held(NULL, "malloc");
|
2001-08-17 04:48:29 +04:00
|
|
|
#endif
|
|
|
|
#ifdef MALLOC_DEBUG
|
2003-09-23 20:36:59 +04:00
|
|
|
if (debug_malloc(size, ksp, flags, (void *) &va))
|
2001-08-17 04:48:29 +04:00
|
|
|
return ((void *) va);
|
1993-03-21 12:45:37 +03:00
|
|
|
#endif
|
|
|
|
indx = BUCKETINDX(size);
|
|
|
|
kbp = &bucket[indx];
|
2001-01-14 05:08:35 +03:00
|
|
|
s = splvm();
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_lock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
while (ksp->ks_memuse >= ksp->ks_limit) {
|
|
|
|
if (flags & M_NOWAIT) {
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_unlock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
splx(s);
|
|
|
|
return ((void *) NULL);
|
|
|
|
}
|
|
|
|
if (ksp->ks_limblocks < 65535)
|
|
|
|
ksp->ks_limblocks++;
|
2003-02-15 00:51:36 +03:00
|
|
|
ltsleep((caddr_t)ksp, PSWP+2, ksp->ks_shortdesc, 0,
|
|
|
|
&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1994-05-13 12:32:17 +04:00
|
|
|
ksp->ks_size |= 1 << indx;
|
|
|
|
#endif
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
|
1993-03-21 12:45:37 +03:00
|
|
|
#endif
|
|
|
|
if (kbp->kb_next == NULL) {
|
1994-05-13 12:32:17 +04:00
|
|
|
kbp->kb_last = NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
if (size > MAXALLOCSAVE)
|
2001-11-21 04:30:04 +03:00
|
|
|
allocsize = round_page(size);
|
1993-03-21 12:45:37 +03:00
|
|
|
else
|
|
|
|
allocsize = 1 << indx;
|
1999-12-04 00:43:19 +03:00
|
|
|
npg = btoc(allocsize);
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_unlock(&malloc_slock);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
|
2001-12-05 02:56:36 +03:00
|
|
|
(vsize_t)ctob(npg),
|
2002-09-15 20:54:26 +04:00
|
|
|
((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
|
|
|
|
((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
|
2000-05-09 00:02:21 +04:00
|
|
|
if (__predict_false(va == NULL)) {
|
1996-06-13 20:53:34 +04:00
|
|
|
/*
|
|
|
|
* Kmem_malloc() can return NULL, even if it can
|
2005-01-01 06:24:43 +03:00
|
|
|
* wait, if there is no map space available, because
|
1996-06-13 20:53:34 +04:00
|
|
|
* it can't fix that problem. Neither can we,
|
|
|
|
* right now. (We should release pages which
|
|
|
|
* are completely free and which are in buckets
|
|
|
|
* with too many free elements.)
|
|
|
|
*/
|
2001-12-04 23:13:19 +03:00
|
|
|
if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
|
1996-06-13 20:53:34 +04:00
|
|
|
panic("malloc: out of space in kmem_map");
|
1993-07-15 17:33:23 +04:00
|
|
|
splx(s);
|
2002-09-15 20:54:26 +04:00
|
|
|
return (NULL);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_lock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
kbp->kb_total += kbp->kb_elmpercl;
|
|
|
|
#endif
|
|
|
|
kup = btokup(va);
|
|
|
|
kup->ku_indx = indx;
|
|
|
|
if (allocsize > MAXALLOCSAVE) {
|
|
|
|
if (npg > 65535)
|
|
|
|
panic("malloc: allocation too large");
|
|
|
|
kup->ku_pagecnt = npg;
|
|
|
|
#ifdef KMEMSTATS
|
|
|
|
ksp->ks_memuse += allocsize;
|
|
|
|
#endif
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#ifdef KMEMSTATS
|
|
|
|
kup->ku_freecnt = kbp->kb_elmpercl;
|
|
|
|
kbp->kb_totalfree += kbp->kb_elmpercl;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Just in case we blocked while allocating memory,
|
|
|
|
* and someone else also allocated memory for this
|
|
|
|
* bucket, don't assume the list is still empty.
|
|
|
|
*/
|
|
|
|
savedlist = kbp->kb_next;
|
2000-02-11 22:22:52 +03:00
|
|
|
kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
|
1994-05-13 12:32:17 +04:00
|
|
|
for (;;) {
|
|
|
|
freep = (struct freelist *)cp;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Copy in known text to detect modification
|
|
|
|
* after freeing.
|
|
|
|
*/
|
2003-09-03 15:13:14 +04:00
|
|
|
end = (uint32_t *)&cp[copysize];
|
|
|
|
for (lp = (uint32_t *)cp; lp < end; lp++)
|
1994-05-13 12:32:17 +04:00
|
|
|
*lp = WEIRD_ADDR;
|
|
|
|
freep->type = M_FREE;
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
if (cp <= va)
|
|
|
|
break;
|
|
|
|
cp -= allocsize;
|
|
|
|
freep->next = cp;
|
|
|
|
}
|
|
|
|
freep->next = savedlist;
|
|
|
|
if (kbp->kb_last == NULL)
|
|
|
|
kbp->kb_last = (caddr_t)freep;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
va = kbp->kb_next;
|
1994-05-13 12:32:17 +04:00
|
|
|
kbp->kb_next = ((struct freelist *)va)->next;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
freep = (struct freelist *)va;
|
2003-02-01 09:23:35 +03:00
|
|
|
/* XXX potential to get garbage pointer here. */
|
1998-02-07 05:40:36 +03:00
|
|
|
if (kbp->kb_next) {
|
|
|
|
int rv;
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t addr = (vaddr_t)kbp->kb_next;
|
1998-02-07 05:40:36 +03:00
|
|
|
|
1999-05-28 23:40:09 +04:00
|
|
|
vm_map_lock(kmem_map);
|
1998-02-07 05:40:36 +03:00
|
|
|
rv = uvm_map_checkprot(kmem_map, addr,
|
2001-12-05 02:56:36 +03:00
|
|
|
addr + sizeof(struct freelist), VM_PROT_WRITE);
|
1999-05-28 23:40:09 +04:00
|
|
|
vm_map_unlock(kmem_map);
|
1998-02-07 05:40:36 +03:00
|
|
|
|
2000-05-09 00:02:21 +04:00
|
|
|
if (__predict_false(rv == 0)) {
|
2001-12-05 02:56:36 +03:00
|
|
|
printf("Data modified on freelist: "
|
|
|
|
"word %ld of object %p size %ld previous type %s "
|
|
|
|
"(invalid addr %p)\n",
|
1999-03-24 08:50:49 +03:00
|
|
|
(long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
|
2003-08-02 11:08:02 +04:00
|
|
|
va, size, "foo", kbp->kb_next);
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
1999-03-24 08:50:49 +03:00
|
|
|
hitmlog(va);
|
1998-01-22 01:24:32 +03:00
|
|
|
#endif
|
1999-03-24 08:50:49 +03:00
|
|
|
kbp->kb_next = NULL;
|
1998-02-07 05:40:36 +03:00
|
|
|
}
|
1994-05-13 12:32:17 +04:00
|
|
|
}
|
1995-05-02 02:39:11 +04:00
|
|
|
|
|
|
|
/* Fill the fields that we've used with WEIRD_ADDR */
|
2003-02-01 09:23:35 +03:00
|
|
|
#ifdef _LP64
|
|
|
|
freep->type = (struct malloc_type *)
|
|
|
|
(WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
|
|
|
|
#else
|
|
|
|
freep->type = (struct malloc_type *) WEIRD_ADDR;
|
1994-05-13 12:32:17 +04:00
|
|
|
#endif
|
2003-09-03 15:13:14 +04:00
|
|
|
end = (uint32_t *)&freep->next +
|
1995-05-02 02:39:11 +04:00
|
|
|
(sizeof(freep->next) / sizeof(int32_t));
|
2003-09-03 15:13:14 +04:00
|
|
|
for (lp = (uint32_t *)&freep->next; lp < end; lp++)
|
1995-05-02 02:39:11 +04:00
|
|
|
*lp = WEIRD_ADDR;
|
|
|
|
|
|
|
|
/* and check that the data hasn't been modified. */
|
2002-11-10 06:35:31 +03:00
|
|
|
end = (uint32_t *)&va[copysize];
|
2003-09-03 15:13:14 +04:00
|
|
|
for (lp = (uint32_t *)va; lp < end; lp++) {
|
2000-05-09 00:02:21 +04:00
|
|
|
if (__predict_true(*lp == WEIRD_ADDR))
|
1994-05-13 12:32:17 +04:00
|
|
|
continue;
|
2001-12-05 02:56:36 +03:00
|
|
|
printf("Data modified on freelist: "
|
|
|
|
"word %ld of object %p size %ld previous type %s "
|
|
|
|
"(0x%x != 0x%x)\n",
|
2002-11-10 06:35:31 +03:00
|
|
|
(long)(lp - (uint32_t *)va), va, size,
|
2003-08-02 11:08:02 +04:00
|
|
|
"bar", *lp, WEIRD_ADDR);
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
|
|
|
hitmlog(va);
|
|
|
|
#endif
|
1994-05-13 12:32:17 +04:00
|
|
|
break;
|
|
|
|
}
|
1995-05-02 02:39:11 +04:00
|
|
|
|
1994-05-13 12:32:17 +04:00
|
|
|
freep->spare0 = 0;
|
|
|
|
#endif /* DIAGNOSTIC */
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
kup = btokup(va);
|
|
|
|
if (kup->ku_indx != indx)
|
|
|
|
panic("malloc: wrong bucket");
|
|
|
|
if (kup->ku_freecnt == 0)
|
|
|
|
panic("malloc: lost data");
|
|
|
|
kup->ku_freecnt--;
|
|
|
|
kbp->kb_totalfree--;
|
|
|
|
ksp->ks_memuse += 1 << indx;
|
|
|
|
out:
|
|
|
|
kbp->kb_calls++;
|
|
|
|
ksp->ks_inuse++;
|
|
|
|
ksp->ks_calls++;
|
|
|
|
if (ksp->ks_memuse > ksp->ks_maxused)
|
|
|
|
ksp->ks_maxused = ksp->ks_memuse;
|
|
|
|
#else
|
|
|
|
out:
|
1998-01-22 01:24:32 +03:00
|
|
|
#endif
|
|
|
|
#ifdef MALLOCLOG
|
2003-08-02 11:08:02 +04:00
|
|
|
domlog(va, size, ksp, 1, file, line);
|
1993-03-21 12:45:37 +03:00
|
|
|
#endif
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_unlock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
splx(s);
|
2001-11-30 04:54:21 +03:00
|
|
|
if ((flags & M_ZERO) != 0)
|
2001-11-17 06:50:27 +03:00
|
|
|
memset(va, 0, size);
|
1993-03-21 12:45:37 +03:00
|
|
|
return ((void *) va);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free a block of memory allocated by malloc.
|
|
|
|
*/
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
|
|
|
void
|
2003-08-02 11:08:02 +04:00
|
|
|
_free(void *addr, struct malloc_type *ksp, const char *file, long line)
|
1998-01-22 01:24:32 +03:00
|
|
|
#else
|
1993-03-21 12:45:37 +03:00
|
|
|
void
|
2003-02-01 09:23:35 +03:00
|
|
|
free(void *addr, struct malloc_type *ksp)
|
1998-01-22 01:24:32 +03:00
|
|
|
#endif /* MALLOCLOG */
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct kmembuckets *kbp;
|
|
|
|
struct kmemusage *kup;
|
|
|
|
struct freelist *freep;
|
1993-06-27 10:01:27 +04:00
|
|
|
long size;
|
1993-03-21 12:45:37 +03:00
|
|
|
int s;
|
1994-05-13 12:32:17 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
caddr_t cp;
|
1995-05-02 02:39:11 +04:00
|
|
|
int32_t *end, *lp;
|
|
|
|
long alloc, copysize;
|
1994-05-13 12:32:17 +04:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2001-08-17 04:48:29 +04:00
|
|
|
#ifdef MALLOC_DEBUG
|
2003-02-01 09:23:35 +03:00
|
|
|
if (debug_free(addr, ksp))
|
2001-08-17 04:48:29 +04:00
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2000-02-01 22:37:58 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Ensure that we're free'ing something that we could
|
|
|
|
* have allocated in the first place. That is, check
|
|
|
|
* to see that the address is within kmem_map.
|
|
|
|
*/
|
2003-08-28 18:54:32 +04:00
|
|
|
if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
|
|
|
|
(vaddr_t)addr >= vm_map_max(kmem_map)))
|
2000-02-01 22:37:58 +03:00
|
|
|
panic("free: addr %p not within kmem_map", addr);
|
|
|
|
#endif
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
kup = btokup(addr);
|
|
|
|
size = 1 << kup->ku_indx;
|
1994-05-13 12:32:17 +04:00
|
|
|
kbp = &bucket[kup->ku_indx];
|
2001-01-14 05:08:35 +03:00
|
|
|
s = splvm();
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_lock(&malloc_slock);
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
2003-08-02 11:08:02 +04:00
|
|
|
domlog(addr, 0, ksp, 2, file, line);
|
1998-01-22 01:24:32 +03:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
1994-05-13 12:32:17 +04:00
|
|
|
/*
|
|
|
|
* Check for returns of data that do not point to the
|
|
|
|
* beginning of the allocation.
|
|
|
|
*/
|
2000-02-11 22:22:52 +03:00
|
|
|
if (size > PAGE_SIZE)
|
|
|
|
alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
|
1993-03-21 12:45:37 +03:00
|
|
|
else
|
|
|
|
alloc = addrmask[kup->ku_indx];
|
1994-05-13 12:32:17 +04:00
|
|
|
if (((u_long)addr & alloc) != 0)
|
2002-09-27 19:35:29 +04:00
|
|
|
panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
|
2003-02-01 09:23:35 +03:00
|
|
|
addr, size, ksp->ks_shortdesc, alloc);
|
1993-03-21 12:45:37 +03:00
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
if (size > MAXALLOCSAVE) {
|
1998-08-13 06:10:37 +04:00
|
|
|
uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
size = kup->ku_pagecnt << PGSHIFT;
|
|
|
|
ksp->ks_memuse -= size;
|
|
|
|
kup->ku_indx = 0;
|
|
|
|
kup->ku_pagecnt = 0;
|
|
|
|
if (ksp->ks_memuse + size >= ksp->ks_limit &&
|
|
|
|
ksp->ks_memuse < ksp->ks_limit)
|
|
|
|
wakeup((caddr_t)ksp);
|
2003-05-06 22:07:57 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (ksp->ks_inuse == 0)
|
|
|
|
panic("free 1: inuse 0, probable double free");
|
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
ksp->ks_inuse--;
|
|
|
|
kbp->kb_total -= 1;
|
|
|
|
#endif
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_unlock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
1994-05-13 12:32:17 +04:00
|
|
|
freep = (struct freelist *)addr;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Check for multiple frees. Use a quick check to see if
|
|
|
|
* it looks free before laboriously searching the freelist.
|
|
|
|
*/
|
2000-05-09 00:02:21 +04:00
|
|
|
if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
|
1996-06-06 23:13:32 +04:00
|
|
|
for (cp = kbp->kb_next; cp;
|
|
|
|
cp = ((struct freelist *)cp)->next) {
|
1994-05-13 12:32:17 +04:00
|
|
|
if (addr != cp)
|
|
|
|
continue;
|
1996-10-13 06:32:29 +04:00
|
|
|
printf("multiply freed item %p\n", addr);
|
1998-01-22 01:24:32 +03:00
|
|
|
#ifdef MALLOCLOG
|
|
|
|
hitmlog(addr);
|
|
|
|
#endif
|
1994-05-13 12:32:17 +04:00
|
|
|
panic("free: duplicated free");
|
|
|
|
}
|
|
|
|
}
|
1998-11-04 09:19:55 +03:00
|
|
|
#ifdef LOCKDEBUG
|
|
|
|
/*
|
|
|
|
* Check if we're freeing a locked simple lock.
|
|
|
|
*/
|
1999-01-22 10:55:49 +03:00
|
|
|
simple_lock_freecheck(addr, (char *)addr + size);
|
1998-11-04 09:19:55 +03:00
|
|
|
#endif
|
1994-05-13 12:32:17 +04:00
|
|
|
/*
|
|
|
|
* Copy in known text to detect modification after freeing
|
|
|
|
* and to make it look free. Also, save the type being freed
|
|
|
|
* so we can list likely culprit if modification is detected
|
|
|
|
* when the object is reallocated.
|
|
|
|
*/
|
|
|
|
copysize = size < MAX_COPY ? size : MAX_COPY;
|
1995-05-02 02:39:11 +04:00
|
|
|
end = (int32_t *)&((caddr_t)addr)[copysize];
|
|
|
|
for (lp = (int32_t *)addr; lp < end; lp++)
|
1994-05-13 12:32:17 +04:00
|
|
|
*lp = WEIRD_ADDR;
|
2003-02-01 09:23:35 +03:00
|
|
|
freep->type = ksp;
|
1994-05-13 12:32:17 +04:00
|
|
|
#endif /* DIAGNOSTIC */
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
kup->ku_freecnt++;
|
1998-08-18 10:11:35 +04:00
|
|
|
if (kup->ku_freecnt >= kbp->kb_elmpercl) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (kup->ku_freecnt > kbp->kb_elmpercl)
|
|
|
|
panic("free: multiple frees");
|
|
|
|
else if (kbp->kb_totalfree > kbp->kb_highwat)
|
|
|
|
kbp->kb_couldfree++;
|
1998-08-18 10:11:35 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
kbp->kb_totalfree++;
|
|
|
|
ksp->ks_memuse -= size;
|
|
|
|
if (ksp->ks_memuse + size >= ksp->ks_limit &&
|
|
|
|
ksp->ks_memuse < ksp->ks_limit)
|
|
|
|
wakeup((caddr_t)ksp);
|
2003-05-06 22:07:57 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (ksp->ks_inuse == 0)
|
|
|
|
panic("free 2: inuse 0, probable double free");
|
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
ksp->ks_inuse--;
|
|
|
|
#endif
|
1994-05-13 12:32:17 +04:00
|
|
|
if (kbp->kb_next == NULL)
|
|
|
|
kbp->kb_next = addr;
|
|
|
|
else
|
|
|
|
((struct freelist *)kbp->kb_last)->next = addr;
|
|
|
|
freep->next = NULL;
|
|
|
|
kbp->kb_last = addr;
|
2003-02-15 00:51:36 +03:00
|
|
|
simple_unlock(&malloc_slock);
|
1993-03-21 12:45:37 +03:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
1996-08-28 00:01:42 +04:00
|
|
|
/*
|
|
|
|
* Change the size of a block of memory.
|
|
|
|
*/
|
|
|
|
void *
|
2003-02-01 09:23:35 +03:00
|
|
|
realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
|
|
|
|
int flags)
|
1996-08-28 00:01:42 +04:00
|
|
|
{
|
2000-03-30 13:27:11 +04:00
|
|
|
struct kmemusage *kup;
|
2002-08-26 01:19:41 +04:00
|
|
|
unsigned long cursize;
|
1996-08-28 00:01:42 +04:00
|
|
|
void *newaddr;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
long alloc;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2001-12-05 02:56:36 +03:00
|
|
|
* realloc() with a NULL pointer is the same as malloc().
|
1996-08-28 00:01:42 +04:00
|
|
|
*/
|
|
|
|
if (curaddr == NULL)
|
2003-02-01 09:23:35 +03:00
|
|
|
return (malloc(newsize, ksp, flags));
|
1996-08-28 00:01:42 +04:00
|
|
|
|
|
|
|
/*
|
2001-12-05 02:56:36 +03:00
|
|
|
* realloc() with zero size is the same as free().
|
1996-08-28 00:01:42 +04:00
|
|
|
*/
|
|
|
|
if (newsize == 0) {
|
2003-02-01 09:23:35 +03:00
|
|
|
free(curaddr, ksp);
|
1996-08-28 00:01:42 +04:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2001-06-05 08:39:02 +04:00
|
|
|
#ifdef LOCKDEBUG
|
|
|
|
if ((flags & M_NOWAIT) == 0)
|
|
|
|
simple_lock_only_held(NULL, "realloc");
|
|
|
|
#endif
|
|
|
|
|
1996-08-28 00:01:42 +04:00
|
|
|
/*
|
|
|
|
* Find out how large the old allocation was (and do some
|
|
|
|
* sanity checking).
|
|
|
|
*/
|
|
|
|
kup = btokup(curaddr);
|
|
|
|
cursize = 1 << kup->ku_indx;
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Check for returns of data that do not point to the
|
|
|
|
* beginning of the allocation.
|
|
|
|
*/
|
2000-02-11 22:22:52 +03:00
|
|
|
if (cursize > PAGE_SIZE)
|
|
|
|
alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
|
1996-08-28 00:01:42 +04:00
|
|
|
else
|
|
|
|
alloc = addrmask[kup->ku_indx];
|
|
|
|
if (((u_long)curaddr & alloc) != 0)
|
2001-12-05 02:56:36 +03:00
|
|
|
panic("realloc: "
|
|
|
|
"unaligned addr %p, size %ld, type %s, mask %ld\n",
|
2003-02-01 09:23:35 +03:00
|
|
|
curaddr, cursize, ksp->ks_shortdesc, alloc);
|
1996-08-28 00:01:42 +04:00
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
|
|
|
|
if (cursize > MAXALLOCSAVE)
|
|
|
|
cursize = ctob(kup->ku_pagecnt);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we already actually have as much as they want, we're done.
|
|
|
|
*/
|
|
|
|
if (newsize <= cursize)
|
|
|
|
return (curaddr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't satisfy the allocation with the existing block.
|
|
|
|
* Allocate a new one and copy the data.
|
|
|
|
*/
|
2003-02-01 09:23:35 +03:00
|
|
|
newaddr = malloc(newsize, ksp, flags);
|
2000-05-09 00:02:21 +04:00
|
|
|
if (__predict_false(newaddr == NULL)) {
|
1996-08-28 00:01:42 +04:00
|
|
|
/*
|
2001-12-05 02:56:36 +03:00
|
|
|
* malloc() failed, because flags included M_NOWAIT.
|
1996-08-28 00:01:42 +04:00
|
|
|
* Return NULL to indicate that failure. The old
|
|
|
|
* pointer is still valid.
|
|
|
|
*/
|
2001-12-05 02:56:36 +03:00
|
|
|
return (NULL);
|
1996-08-28 00:01:42 +04:00
|
|
|
}
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(newaddr, curaddr, cursize);
|
1996-08-28 00:01:42 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We were successful: free the old allocation and return
|
|
|
|
* the new one.
|
|
|
|
*/
|
2003-02-01 09:23:35 +03:00
|
|
|
free(curaddr, ksp);
|
1996-08-28 00:01:42 +04:00
|
|
|
return (newaddr);
|
|
|
|
}
|
|
|
|
|
2001-12-05 04:29:04 +03:00
|
|
|
/*
|
|
|
|
* Roundup size to the actual allocation size.
|
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
malloc_roundup(unsigned long size)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (size > MAXALLOCSAVE)
|
|
|
|
return (roundup(size, PAGE_SIZE));
|
|
|
|
else
|
|
|
|
return (1 << BUCKETINDX(size));
|
|
|
|
}
|
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
/*
|
|
|
|
* Add a malloc type to the system.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
malloc_type_attach(struct malloc_type *type)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (nkmempages == 0)
|
|
|
|
panic("malloc_type_attach: nkmempages == 0");
|
|
|
|
|
|
|
|
if (type->ks_magic != M_MAGIC)
|
|
|
|
panic("malloc_type_attach: bad magic");
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
{
|
|
|
|
struct malloc_type *ksp;
|
|
|
|
for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
|
|
|
|
if (ksp == type)
|
|
|
|
panic("malloc_type_attach: already on list");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KMEMSTATS
|
|
|
|
if (type->ks_limit == 0)
|
|
|
|
type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
|
|
|
|
#else
|
|
|
|
type->ks_limit = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
type->ks_next = kmemstatistics;
|
|
|
|
kmemstatistics = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a malloc type from the system..
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
malloc_type_detach(struct malloc_type *type)
|
|
|
|
{
|
|
|
|
struct malloc_type *ksp;
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (type->ks_magic != M_MAGIC)
|
|
|
|
panic("malloc_type_detach: bad magic");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (type == kmemstatistics)
|
|
|
|
kmemstatistics = type->ks_next;
|
|
|
|
else {
|
|
|
|
for (ksp = kmemstatistics; ksp->ks_next != NULL;
|
|
|
|
ksp = ksp->ks_next) {
|
|
|
|
if (ksp->ks_next == type) {
|
|
|
|
ksp->ks_next = type->ks_next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (ksp->ks_next == NULL)
|
|
|
|
panic("malloc_type_detach: not on list");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
type->ks_next = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the limit on a malloc type.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
malloc_type_setlimit(struct malloc_type *type, u_long limit)
|
|
|
|
{
|
|
|
|
#ifdef KMEMSTATS
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splvm();
|
|
|
|
type->ks_limit = limit;
|
|
|
|
splx(s);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2000-02-11 22:22:52 +03:00
|
|
|
/*
|
|
|
|
* Compute the number of pages that kmem_map will map, that is,
|
|
|
|
* the size of the kernel malloc arena.
|
|
|
|
*/
|
|
|
|
void
|
2001-12-05 02:56:36 +03:00
|
|
|
kmeminit_nkmempages(void)
|
2000-02-11 22:22:52 +03:00
|
|
|
{
|
|
|
|
int npages;
|
|
|
|
|
|
|
|
if (nkmempages != 0) {
|
|
|
|
/*
|
|
|
|
* It's already been set (by us being here before, or
|
|
|
|
* by patching or kernel config options), bail out now.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use the following (simple) formula:
|
|
|
|
*
|
|
|
|
* - Starting point is physical memory / 4.
|
|
|
|
*
|
|
|
|
* - Clamp it down to NKMEMPAGES_MAX.
|
|
|
|
*
|
|
|
|
* - Round it up to NKMEMPAGES_MIN.
|
|
|
|
*/
|
|
|
|
npages = physmem / 4;
|
|
|
|
|
|
|
|
if (npages > NKMEMPAGES_MAX)
|
|
|
|
npages = NKMEMPAGES_MAX;
|
|
|
|
|
|
|
|
if (npages < NKMEMPAGES_MIN)
|
|
|
|
npages = NKMEMPAGES_MIN;
|
|
|
|
|
|
|
|
nkmempages = npages;
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Initialize the kernel memory allocator
|
|
|
|
*/
|
1996-02-04 05:15:01 +03:00
|
|
|
void
|
2001-12-05 02:56:36 +03:00
|
|
|
kmeminit(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2003-02-01 09:23:35 +03:00
|
|
|
__link_set_decl(malloc_types, struct malloc_type);
|
|
|
|
struct malloc_type * const *ksp;
|
2003-08-30 11:54:32 +04:00
|
|
|
vaddr_t kmb, kml;
|
1997-01-30 09:50:46 +03:00
|
|
|
#ifdef KMEMSTATS
|
2000-03-30 13:27:11 +04:00
|
|
|
long indx;
|
1997-01-30 09:50:46 +03:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
|
|
|
|
ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
|
|
|
|
#endif
|
|
|
|
#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
|
|
|
|
ERROR!_kmeminit:_MAXALLOCSAVE_too_big
|
|
|
|
#endif
|
1999-12-04 00:43:19 +03:00
|
|
|
#if (MAXALLOCSAVE < NBPG)
|
1993-03-21 12:45:37 +03:00
|
|
|
ERROR!_kmeminit:_MAXALLOCSAVE_too_small
|
|
|
|
#endif
|
1995-05-02 02:39:11 +04:00
|
|
|
|
|
|
|
if (sizeof(struct freelist) > (1 << MINBUCKET))
|
|
|
|
panic("minbucket too small/struct freelist too big");
|
|
|
|
|
2000-02-11 22:22:52 +03:00
|
|
|
/*
|
|
|
|
* Compute the number of kmem_map pages, if we have not
|
|
|
|
* done so already.
|
|
|
|
*/
|
|
|
|
kmeminit_nkmempages();
|
|
|
|
|
1998-02-05 10:59:28 +03:00
|
|
|
kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
|
2001-12-05 02:56:36 +03:00
|
|
|
(vsize_t)(nkmempages * sizeof(struct kmemusage)));
|
2003-08-31 16:59:05 +04:00
|
|
|
kmb = 0;
|
2003-08-30 11:54:32 +04:00
|
|
|
kmem_map = uvm_km_suballoc(kernel_map, &kmb,
|
|
|
|
&kml, (vsize_t)(nkmempages << PAGE_SHIFT),
|
2001-12-05 02:56:36 +03:00
|
|
|
VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
|
2005-01-02 00:08:02 +03:00
|
|
|
uvm_km_vacache_init(kmem_map, "kvakmem", 0);
|
2003-08-30 11:54:32 +04:00
|
|
|
kmembase = (char *)kmb;
|
|
|
|
kmemlimit = (char *)kml;
|
1993-03-21 12:45:37 +03:00
|
|
|
#ifdef KMEMSTATS
|
|
|
|
for (indx = 0; indx < MINBUCKET + 16; indx++) {
|
2000-02-11 22:22:52 +03:00
|
|
|
if (1 << indx >= PAGE_SIZE)
|
1993-03-21 12:45:37 +03:00
|
|
|
bucket[indx].kb_elmpercl = 1;
|
|
|
|
else
|
2000-02-11 22:22:52 +03:00
|
|
|
bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
|
1993-03-21 12:45:37 +03:00
|
|
|
bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
|
|
|
|
}
|
|
|
|
#endif
|
2003-02-01 09:23:35 +03:00
|
|
|
|
|
|
|
/* Attach all of the statically-linked malloc types. */
|
|
|
|
__link_set_foreach(ksp, malloc_types)
|
|
|
|
malloc_type_attach(*ksp);
|
|
|
|
|
2001-08-17 04:48:29 +04:00
|
|
|
#ifdef MALLOC_DEBUG
|
|
|
|
debug_malloc_init();
|
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1998-12-02 23:35:28 +03:00
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/db_output.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump kmem statistics from ddb.
|
|
|
|
*
|
|
|
|
* usage: call dump_kmemstats
|
|
|
|
*/
|
2001-12-05 02:56:36 +03:00
|
|
|
void dump_kmemstats(void);
|
1998-12-02 23:35:28 +03:00
|
|
|
|
|
|
|
void
|
2001-12-05 02:56:36 +03:00
|
|
|
dump_kmemstats(void)
|
1998-12-02 23:35:28 +03:00
|
|
|
{
|
|
|
|
#ifdef KMEMSTATS
|
2003-02-01 09:23:35 +03:00
|
|
|
struct malloc_type *ksp;
|
1998-12-02 23:35:28 +03:00
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
|
|
|
|
if (ksp->ks_memuse == 0)
|
|
|
|
continue;
|
|
|
|
db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
|
|
|
|
(int)(20 - strlen(ksp->ks_shortdesc)),
|
|
|
|
" ",
|
|
|
|
ksp->ks_memuse);
|
1998-12-02 23:35:28 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
db_printf("Kmem stats are not being collected.\n");
|
|
|
|
#endif /* KMEMSTATS */
|
|
|
|
}
|
|
|
|
#endif /* DDB */
|
2003-08-27 01:48:53 +04:00
|
|
|
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* Diagnostic messages about "Data modified on
|
|
|
|
* freelist" indicate a memory corruption, but
|
|
|
|
* they do not help tracking it down.
|
|
|
|
* This function can be called at various places
|
|
|
|
* to sanity check malloc's freelist and discover
|
|
|
|
* where does the corruption take place.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
freelist_sanitycheck(void) {
|
|
|
|
int i,j;
|
|
|
|
struct kmembuckets *kbp;
|
|
|
|
struct freelist *freep;
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
for (i = MINBUCKET; i <= MINBUCKET + 15; i++) {
|
|
|
|
kbp = &bucket[i];
|
|
|
|
freep = (struct freelist *)kbp->kb_next;
|
|
|
|
j = 0;
|
|
|
|
while(freep) {
|
|
|
|
vm_map_lock(kmem_map);
|
|
|
|
rv = uvm_map_checkprot(kmem_map, (vaddr_t)freep,
|
|
|
|
(vaddr_t)freep + sizeof(struct freelist),
|
|
|
|
VM_PROT_WRITE);
|
|
|
|
vm_map_unlock(kmem_map);
|
|
|
|
|
|
|
|
if ((rv == 0) || (*(int *)freep != WEIRD_ADDR)) {
|
|
|
|
printf("bucket %i, chunck %d at %p modified\n",
|
|
|
|
i, j, freep);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
freep = (struct freelist *)freep->next;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|