revert previous commit not yet fully functional, sorry

This commit is contained in:
para 2013-01-26 15:18:00 +00:00
parent cca299e0a3
commit 39dafdefa9
4 changed files with 190 additions and 76 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $ */
/* $NetBSD: subr_vmem.c,v 1.79 2013/01/26 15:18:00 para Exp $ */
/*-
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.79 2013/01/26 15:18:00 para Exp $");
#if defined(_KERNEL)
#include "opt_ddb.h"
@ -53,7 +53,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
#include <sys/kmem.h>
#include <sys/pool.h>
#include <sys/vmem.h>
#include <sys/vmem_impl.h>
#include <sys/workqueue.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
@ -62,13 +61,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
#include <uvm/uvm_page.h>
#include <uvm/uvm_pdaemon.h>
#else /* defined(_KERNEL) */
#include <stdio.h>
#include <errno.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "../sys/vmem.h"
#include "../sys/vmem_impl.h"
#endif /* defined(_KERNEL) */
@ -85,23 +78,28 @@ VMEM_EVCNT_DEFINE(bt_pages)
VMEM_EVCNT_DEFINE(bt_count)
VMEM_EVCNT_DEFINE(bt_inuse)
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
#define LOCK_DECL(name) \
kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
#define CONDVAR_DECL(name) \
kcondvar_t name
#else /* defined(_KERNEL) */
#include <stdio.h>
#include <errno.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#define VMEM_EVCNT_INCR(ev) /* nothing */
#define VMEM_EVCNT_DECR(ev) /* nothing */
#define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
#define VMEM_CONDVAR_DESTROY(vm) /* nothing */
#define VMEM_CONDVAR_WAIT(vm) /* nothing */
#define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
#define UNITTEST
#define KASSERT(a) assert(a)
#define LOCK_DECL(name) /* nothing */
#define CONDVAR_DECL(name) /* nothing */
#define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
#define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
#define mutex_init(a, b, c) /* nothing */
#define mutex_destroy(a) /* nothing */
#define mutex_enter(a) /* nothing */
@ -112,25 +110,74 @@ VMEM_EVCNT_DEFINE(bt_inuse)
#define panic(...) printf(__VA_ARGS__); abort()
#endif /* defined(_KERNEL) */
struct vmem;
struct vmem_btag;
#if defined(VMEM_SANITY)
static void vmem_check(vmem_t *);
#else /* defined(VMEM_SANITY) */
#define vmem_check(vm) /* nothing */
#endif /* defined(VMEM_SANITY) */
#define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
#define VMEM_HASHSIZE_MIN 1 /* XXX */
#define VMEM_HASHSIZE_MAX 65536 /* XXX */
#define VMEM_HASHSIZE_INIT 1
#define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
#if defined(_KERNEL)
static bool vmem_bootstrapped = false;
static kmutex_t vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
#endif /* defined(_KERNEL) */
CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
LIST_HEAD(vmem_freelist, vmem_btag);
LIST_HEAD(vmem_hashlist, vmem_btag);
/* ---- misc */
#if defined(QCACHE)
#define VMEM_QCACHE_IDX_MAX 32
#define QC_NAME_MAX 16
struct qcache {
pool_cache_t qc_cache;
vmem_t *qc_vmem;
char qc_name[QC_NAME_MAX];
};
typedef struct qcache qcache_t;
#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
#endif /* defined(QCACHE) */
#define VMEM_NAME_MAX 16
/* vmem arena */
struct vmem {
CONDVAR_DECL(vm_cv);
LOCK_DECL(vm_lock);
vm_flag_t vm_flags;
vmem_import_t *vm_importfn;
vmem_release_t *vm_releasefn;
size_t vm_nfreetags;
LIST_HEAD(, vmem_btag) vm_freetags;
void *vm_arg;
struct vmem_seglist vm_seglist;
struct vmem_freelist vm_freelist[VMEM_MAXORDER];
size_t vm_hashsize;
size_t vm_nbusytag;
struct vmem_hashlist *vm_hashlist;
struct vmem_hashlist vm_hash0;
size_t vm_quantum_mask;
int vm_quantum_shift;
size_t vm_size;
size_t vm_inuse;
char vm_name[VMEM_NAME_MAX+1];
LIST_ENTRY(vmem) vm_alllist;
#if defined(QCACHE)
/* quantum cache */
size_t vm_qcache_max;
struct pool_allocator vm_qcache_allocator;
qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
#endif /* defined(QCACHE) */
};
#define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
#define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
@ -139,6 +186,44 @@ static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
#define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
#define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
#if defined(_KERNEL)
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
#endif /* defined(_KERNEL) */
/* boundary tag */
struct vmem_btag {
CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
union {
LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
} bt_u;
#define bt_hashlist bt_u.u_hashlist
#define bt_freelist bt_u.u_freelist
vmem_addr_t bt_start;
vmem_size_t bt_size;
int bt_type;
};
#define BT_TYPE_SPAN 1
#define BT_TYPE_SPAN_STATIC 2
#define BT_TYPE_FREE 3
#define BT_TYPE_BUSY 4
#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
typedef struct vmem_btag bt_t;
#if defined(_KERNEL)
static kmutex_t vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
#endif /* defined(_KERNEL) */
/* ---- misc */
#define VMEM_ALIGNUP(addr, align) \
(-(-(addr) & -(align)))
@ -156,26 +241,36 @@ static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
#else /* defined(_KERNEL) */
#define xmalloc(sz, flags) \
kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
#define xfree(p, sz) kmem_free(p, sz);
kmem_intr_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
#define xfree(p, sz) kmem_intr_free(p, sz);
/*
* Memory for arenas initialized during bootstrap.
* There is memory for STATIC_VMEM_COUNT bootstrap arenas.
*
* BT_RESERVE calculation:
* we allocate memory for boundry tags with vmem, therefor we have
* to keep a reserve of bts used to allocated memory for bts.
* This reserve is 4 for each arena involved in allocating vmems memory.
* BT_MAXFREE: don't cache excessive counts of bts in arenas
*/
#define STATIC_VMEM_COUNT 4
#define STATIC_BT_COUNT 200
#define BT_MINRESERVE 4
#define BT_MAXFREE 64
/* must be equal or greater then qcache multiplier for kmem_va_arena */
#define STATIC_QC_POOL_COUNT 8
static struct vmem static_vmems[STATIC_VMEM_COUNT];
static int static_vmem_count = STATIC_VMEM_COUNT;
static struct vmem_btag static_bts[STATIC_BT_COUNT];
static int static_bt_count = STATIC_BT_COUNT;
static struct vmem kmem_va_meta_arena_store;
static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
vmem_t *kmem_va_meta_arena;
static struct vmem kmem_meta_arena_store;
vmem_t *kmem_meta_arena;
static kmutex_t vmem_refill_lock;
@ -557,17 +652,30 @@ qc_init(vmem_t *vm, size_t qcache_max, int ipl)
snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
vm->vm_name, size);
pc = pool_cache_init(size,
ORDER2SIZE(vm->vm_quantum_shift), 0,
PR_NOALIGN | PR_NOTOUCH /* XXX */,
qc->qc_name, pa, ipl, NULL, NULL, NULL);
if (vm->vm_flags & VM_BOOTSTRAP) {
KASSERT(static_qc_pool_count > 0);
pc = &static_qc_pools[--static_qc_pool_count];
pool_cache_bootstrap(pc, size,
ORDER2SIZE(vm->vm_quantum_shift), 0,
PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
qc->qc_name, pa, ipl, NULL, NULL, NULL);
} else {
pc = pool_cache_init(size,
ORDER2SIZE(vm->vm_quantum_shift), 0,
PR_NOALIGN | PR_NOTOUCH /* XXX */,
qc->qc_name, pa, ipl, NULL, NULL, NULL);
}
qc->qc_cache = pc;
KASSERT(qc->qc_cache != NULL); /* XXX */
if (prevqc != NULL &&
qc->qc_cache->pc_pool.pr_itemsperpage ==
prevqc->qc_cache->pc_pool.pr_itemsperpage) {
pool_cache_destroy(qc->qc_cache);
if (vm->vm_flags & VM_BOOTSTRAP) {
pool_cache_bootstrap_destroy(pc);
//static_qc_pool_count++;
} else {
pool_cache_destroy(qc->qc_cache);
}
vm->vm_qcache[i - 1] = prevqc;
continue;
}
@ -592,14 +700,18 @@ qc_destroy(vmem_t *vm)
if (prevqc == qc) {
continue;
}
pool_cache_destroy(qc->qc_cache);
if (vm->vm_flags & VM_BOOTSTRAP) {
pool_cache_bootstrap_destroy(qc->qc_cache);
} else {
pool_cache_destroy(qc->qc_cache);
}
prevqc = qc;
}
}
#endif
#if defined(_KERNEL)
static void
void
vmem_bootstrap(void)
{
@ -613,20 +725,18 @@ vmem_bootstrap(void)
VMEM_EVCNT_INCR(bt_count);
vmem_btag_freelist_count++;
}
vmem_bootstrapped = TRUE;
}
void
vmem_create_arenas(vmem_t *vm)
vmem_init(vmem_t *vm)
{
kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE,
vmem_alloc, vmem_free, vm,
0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
IPL_VM);
kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
0, 0, PAGE_SIZE,
kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE,
uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
}
@ -708,7 +818,6 @@ vmem_destroy1(vmem_t *vm)
mutex_exit(&vmem_btag_lock);
}
VMEM_CONDVAR_DESTROY(vm);
VMEM_LOCK_DESTROY(vm);
xfree(vm, sizeof(*vm));
}
@ -850,32 +959,29 @@ vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
return ENOMEM;
}
/* ---- vmem API */
/*
* vmem_create_internal: creates a vmem arena.
*/
vmem_t *
vmem_init(vmem_t *vm, const char *name,
vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
vmem_import_t *importfn, vmem_release_t *releasefn,
vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
static vmem_t *
vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size,
vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
{
vmem_t *vm = NULL;
int i;
KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT(quantum > 0);
if (flags & VM_BOOTSTRAP) {
#if defined(_KERNEL)
/* XXX: SMP, we get called early... */
if (!vmem_bootstrapped) {
vmem_bootstrap();
}
KASSERT(static_vmem_count > 0);
vm = &static_vmems[--static_vmem_count];
#endif /* defined(_KERNEL) */
if (vm == NULL) {
} else {
vm = xmalloc(sizeof(*vm), flags);
}
if (vm == NULL) {
@ -905,9 +1011,14 @@ vmem_init(vmem_t *vm, const char *name,
for (i = 0; i < VMEM_MAXORDER; i++) {
LIST_INIT(&vm->vm_freelist[i]);
}
memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist));
vm->vm_hashsize = 1;
vm->vm_hashlist = &vm->vm_hash0;
vm->vm_hashlist = NULL;
if (flags & VM_BOOTSTRAP) {
vm->vm_hashsize = 1;
vm->vm_hashlist = &vm->vm_hash0;
} else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
vmem_destroy1(vm);
return NULL;
}
if (size != 0) {
if (vmem_add(vm, base, size, flags) != 0) {
@ -930,6 +1041,7 @@ vmem_init(vmem_t *vm, const char *name,
}
/* ---- vmem API */
/*
* vmem_create: create an arena.
@ -943,9 +1055,11 @@ vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
{
KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT((flags & (VM_XIMPORT)) == 0);
return vmem_init(NULL, name, base, size, quantum,
return vmem_create_internal(name, base, size, quantum,
importfn, releasefn, source, qcache_max, flags, ipl);
}
@ -961,9 +1075,11 @@ vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
{
KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
KASSERT((flags & (VM_XIMPORT)) == 0);
return vmem_init(NULL, name, base, size, quantum,
return vmem_create_internal(name, base, size, quantum,
(vmem_import_t *)importfn, releasefn, source,
qcache_max, flags | VM_XIMPORT, ipl);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm.c,v 1.133 2013/01/26 13:50:33 para Exp $ */
/* $NetBSD: vm.c,v 1.134 2013/01/26 15:18:01 para Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.133 2013/01/26 13:50:33 para Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.134 2013/01/26 15:18:01 para Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -338,11 +338,12 @@ uvm_init(void)
pool_subsystem_init();
#ifndef RUMP_UNREAL_ALLOCATORS
vmem_bootstrap();
kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE,
NULL, NULL, NULL,
0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
vmem_create_arenas(kmem_arena);
vmem_init(kmem_arena);
kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
vmem_alloc, vmem_free, kmem_arena,

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmem.h,v 1.18 2013/01/26 13:50:33 para Exp $ */
/* $NetBSD: vmem.h,v 1.19 2013/01/26 15:18:01 para Exp $ */
/*-
* Copyright (c)2006 YAMAMOTO Takashi,
@ -54,7 +54,8 @@ extern vmem_t *kmem_arena;
extern vmem_t *kmem_meta_arena;
extern vmem_t *kmem_va_arena;
void vmem_create_arenas(vmem_t *vm);
void vmem_bootstrap(void);
void vmem_init(vmem_t *vm);
vmem_t *vmem_create(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
vmem_import_t *, vmem_release_t *, vmem_t *, vmem_size_t,
@ -62,9 +63,6 @@ vmem_t *vmem_create(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
vmem_t *vmem_xcreate(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
vmem_ximport_t *, vmem_release_t *, vmem_t *, vmem_size_t,
vm_flag_t, int);
vmem_t *vmem_init(vmem_t *, const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
vmem_import_t *, vmem_release_t *, vmem_t *, vmem_size_t,
vm_flag_t, int);
void vmem_destroy(vmem_t *);
int vmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *);
void vmem_free(vmem_t *, vmem_addr_t, vmem_size_t);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $ */
/* $NetBSD: uvm_km.c,v 1.137 2013/01/26 15:18:01 para Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -152,7 +152,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.137 2013/01/26 15:18:01 para Exp $");
#include "opt_uvmhist.h"
@ -180,7 +180,6 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $");
#include <sys/proc.h>
#include <sys/pool.h>
#include <sys/vmem.h>
#include <sys/vmem_impl.h>
#include <sys/kmem.h>
#include <uvm/uvm.h>
@ -203,7 +202,6 @@ int nkmempages = 0;
vaddr_t kmembase;
vsize_t kmemsize;
static struct vmem kmem_arena_store;
vmem_t *kmem_arena = NULL;
vmem_t *kmem_va_arena;
@ -326,9 +324,10 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t end)
kernel_map = &kernel_map_store;
pool_subsystem_init();
vmem_bootstrap();
kmem_arena = vmem_init(&kmem_arena_store, "kmem",
kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE,
NULL, NULL, NULL,
0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
#ifdef PMAP_GROWKERNEL
/*
@ -343,7 +342,7 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t end)
}
#endif
vmem_create_arenas(kmem_arena);
vmem_init(kmem_arena);
UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR
", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0);
@ -351,7 +350,7 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t end)
kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
vmem_alloc, vmem_free, kmem_arena,
(kmem_arena_small ? 4 : 8) * PAGE_SIZE,
VM_NOSLEEP, IPL_VM);
VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
}