slab/cache: a couple more fixes.
- on CACHE_DURING_BOOT init benaphore count with 1. - account for allocated space on early_allocate_pages. - fixed slab position calculation in small slabs. - we can now init all allocator sizes early rather than later. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20914 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
698b6d7195
commit
8cfe0be38e
@ -254,7 +254,7 @@ benaphore_boot_init(benaphore *lock, const char *name, uint32 flags)
|
||||
{
|
||||
if (flags & CACHE_DURING_BOOT) {
|
||||
lock->sem = -1;
|
||||
lock->count = 0;
|
||||
lock->count = 1;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -306,10 +306,18 @@ area_free_pages(object_cache *cache, void *pages)
|
||||
static status_t
|
||||
early_allocate_pages(object_cache *cache, void **pages, uint32 flags)
|
||||
{
|
||||
TRACE_CACHE(cache, "early allocate pages (%lu, 0x0%lx)", cache->slab_size,
|
||||
flags);
|
||||
|
||||
addr_t base = vm_allocate_early(sKernelArgs, cache->slab_size,
|
||||
cache->slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
*pages = (void *)base;
|
||||
|
||||
cache->usage += cache->slab_size;
|
||||
|
||||
TRACE_CACHE(cache, " ... = { %p }", *pages);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -731,7 +739,8 @@ object_cache_reserve(object_cache *cache, size_t object_count, uint32 flags)
|
||||
slab *
|
||||
object_cache::InitSlab(slab *slab, void *pages, size_t byteCount)
|
||||
{
|
||||
TRACE_CACHE(this, "construct (%p, %p, %lu)", slab, pages, byteCount);
|
||||
TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
|
||||
((uint8 *)pages) + byteCount, byteCount);
|
||||
|
||||
slab->pages = pages;
|
||||
slab->count = slab->size = byteCount / object_size;
|
||||
@ -853,7 +862,7 @@ SmallObjectCache::ReturnSlab(slab *slab)
|
||||
slab *
|
||||
SmallObjectCache::ObjectSlab(void *object) const
|
||||
{
|
||||
return slab_in_pages(lower_boundary(object, object_size), slab_size);
|
||||
return slab_in_pages(lower_boundary(object, slab_size), slab_size);
|
||||
}
|
||||
|
||||
|
||||
@ -1265,6 +1274,8 @@ dump_cache_info(int argc, char *argv[])
|
||||
void
|
||||
slab_init(kernel_args *args, addr_t initialBase, size_t initialSize)
|
||||
{
|
||||
dprintf("slab: init base %p + 0x%lx\n", (void *)initialBase, initialSize);
|
||||
|
||||
sInitialBegin = (uint8 *)initialBase;
|
||||
sInitialLimit = sInitialBegin + initialSize;
|
||||
sInitialPointer = sInitialBegin;
|
||||
|
@ -13,9 +13,10 @@
|
||||
#include <kernel.h> // for ROUNDUP
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#define DEBUG_ALLOCATOR
|
||||
// #define TEST_ALL_CACHES_DURING_BOOT
|
||||
//#define TEST_ALL_CACHES_DURING_BOOT
|
||||
|
||||
static const size_t kBlockSizes[] = {
|
||||
16, 24, 32, 48, 64, 80, 96, 112,
|
||||
@ -122,6 +123,7 @@ block_free(void *block)
|
||||
#endif
|
||||
|
||||
int index = size_to_index(tag->size + sizeof(boundary_tag));
|
||||
|
||||
if (index < 0) {
|
||||
area_boundary_tag *areaTag = (area_boundary_tag *)(((uint8 *)block)
|
||||
- sizeof(area_boundary_tag));
|
||||
@ -170,12 +172,8 @@ show_waste(int argc, char *argv[])
|
||||
void
|
||||
block_allocator_init_boot()
|
||||
{
|
||||
for (int index = 0; kBlockSizes[index] != 0; index++) {
|
||||
if (kBlockSizes[index] > 256)
|
||||
break;
|
||||
|
||||
for (int index = 0; kBlockSizes[index] != 0; index++)
|
||||
block_create_cache(index, true);
|
||||
}
|
||||
|
||||
add_debugger_command("show_waste", show_waste,
|
||||
"show cache allocator's memory waste");
|
||||
@ -185,13 +183,6 @@ block_allocator_init_boot()
|
||||
void
|
||||
block_allocator_init_rest()
|
||||
{
|
||||
for (int index = 0; kBlockSizes[index] != 0; index++) {
|
||||
if (kBlockSizes[index] <= 256)
|
||||
continue;
|
||||
|
||||
block_create_cache(index, false);
|
||||
}
|
||||
|
||||
#ifdef TEST_ALL_CACHES_DURING_BOOT
|
||||
for (int index = 0; kBlockSizes[index] != 0; index++) {
|
||||
block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)));
|
||||
|
Loading…
Reference in New Issue
Block a user