* Cleaned up vm_types.h a bit, and made vm_page, vm_cache, and vm_area

opaque types for C.
* As a result, I've renamed some more source files to .cpp, and fixed
  all warnings caused by that.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22326 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-09-27 10:29:05 +00:00
parent e96b202311
commit 393fceb5a0
8 changed files with 582 additions and 46 deletions

View File

@ -9,9 +9,10 @@
#define _KERNEL_VM_TYPES_H
#include <kernel.h>
#include <arch/vm_types.h>
#include <arch/vm_translation_map.h>
#include <condition_variable.h>
#include <kernel.h>
#include <util/DoublyLinkedQueue.h>
#include <sys/uio.h>
@ -31,9 +32,6 @@
#ifdef __cplusplus
struct vm_page_mapping;
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
#else
typedef struct { void *previous; void *next; } vm_page_mapping_link;
#endif
typedef struct vm_page_mapping {
vm_page_mapping_link page_link;
@ -42,7 +40,6 @@ typedef struct vm_page_mapping {
struct vm_area *area;
} vm_page_mapping;
#ifdef __cplusplus
class DoublyLinkedPageLink {
public:
inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
@ -71,10 +68,6 @@ class DoublyLinkedAreaLink {
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink> vm_page_mappings;
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink> vm_area_mappings;
#else // !__cplusplus
typedef struct vm_page_mapping *vm_page_mappings;
typedef struct vm_page_mapping *vm_area_mappings;
#endif
// vm page
typedef struct vm_page {
@ -137,16 +130,10 @@ enum {
CACHE_TYPE_NULL
};
#ifdef __cplusplus
#include <condition_variable.h>
struct vm_dummy_page : vm_page {
ConditionVariable<vm_page> busy_condition;
};
#endif // __cplusplus
// vm_cache
typedef struct vm_cache {
mutex lock;
@ -201,6 +188,13 @@ typedef struct vm_area {
struct vm_area *hash_next;
} vm_area;
#else // !__cplusplus
// these are just opaque types in C
typedef struct vm_page vm_page;
typedef struct vm_cache vm_cache;
typedef struct vm_area vm_area;
#endif
enum {
VM_ASPACE_STATE_NORMAL = 0,
VM_ASPACE_STATE_DELETION

View File

@ -12,7 +12,7 @@ SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
KernelStaticLibrary libx86 :
arch_cpu.c
arch_debug.c
arch_debug.cpp
arch_debug_console.c
arch_elf.c
arch_int.c
@ -22,8 +22,8 @@ KernelStaticLibrary libx86 :
arch_smp.c
arch_thread.c
arch_timer.c
arch_vm.c
arch_vm_translation_map.c
arch_vm.cpp
arch_vm_translation_map.cpp
arch_x86.S
arch_interrupts.S
arch_system_info.c

View File

@ -9,6 +9,7 @@
#include <KernelExport.h>
#include <smp.h>
#include <util/AutoLock.h>
#include <vm.h>
#include <vm_page.h>
#include <vm_priv.h>
@ -37,34 +38,25 @@ void *gDmaAddress;
static uint32 sMemoryTypeBitmap;
static int32 sMemoryTypeIDs[kMaxMemoryTypeRegisters];
static int32 sMemoryTypeRegisterCount;
static uint32 sMemoryTypeRegisterCount;
static spinlock sMemoryTypeLock;
static int32
allocate_mtrr(void)
{
int32 index;
cpu_status state = disable_interrupts();
acquire_spinlock(&sMemoryTypeLock);
InterruptsSpinLocker _(&sMemoryTypeLock);
// find free bit
for (index = 0; index < sMemoryTypeRegisterCount; index++) {
for (uint32 index = 0; index < sMemoryTypeRegisterCount; index++) {
if (sMemoryTypeBitmap & (1UL << index))
continue;
sMemoryTypeBitmap |= 1UL << index;
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
return index;
}
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
return -1;
}
@ -72,13 +64,9 @@ allocate_mtrr(void)
static void
free_mtrr(int32 index)
{
cpu_status state = disable_interrupts();
acquire_spinlock(&sMemoryTypeLock);
InterruptsSpinLocker _(&sMemoryTypeLock);
sMemoryTypeBitmap &= ~(1UL << index);
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
}
@ -204,8 +192,7 @@ arch_vm_init_end(kernel_args *args)
status_t
arch_vm_init_post_modules(kernel_args *args)
{
void *cookie;
int32 i;
// void *cookie;
// the x86 CPU modules are now accessible
@ -219,13 +206,13 @@ arch_vm_init_post_modules(kernel_args *args)
// init memory type ID table
for (i = 0; i < sMemoryTypeRegisterCount; i++) {
for (uint32 i = 0; i < sMemoryTypeRegisterCount; i++) {
sMemoryTypeIDs[i] = -1;
}
// set the physical memory ranges to write-back mode
for (i = 0; i < args->num_physical_memory_ranges; i++) {
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
set_memory_type(-1, args->physical_memory_range[i].start,
args->physical_memory_range[i].size, B_MTR_WB);
}
@ -237,14 +224,16 @@ arch_vm_init_post_modules(kernel_args *args)
void
arch_vm_aspace_swap(vm_address_space *aspace)
{
i386_swap_pgdir((addr_t)i386_translation_map_get_pgdir(&aspace->translation_map));
i386_swap_pgdir((addr_t)i386_translation_map_get_pgdir(
&aspace->translation_map));
}
bool
arch_vm_supports_protection(uint32 protection)
{
// x86 always has the same read/write properties for userland and the kernel.
// x86 always has the same read/write properties for userland and the
// kernel.
// That's why we do not support user-read/kernel-write access. While the
// other way around is not supported either, we don't care in this case
// and give the kernel full access.
@ -259,7 +248,7 @@ arch_vm_supports_protection(uint32 protection)
void
arch_vm_unset_memory_type(struct vm_area *area)
{
int32 index;
uint32 index;
if (area->memory_type == 0)
return;
@ -279,7 +268,8 @@ arch_vm_unset_memory_type(struct vm_area *area)
status_t
arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase, uint32 type)
arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase,
uint32 type)
{
area->memory_type = type >> MEMORY_TYPE_SHIFT;
return set_memory_type(area->id, physicalBase, area->size, type);

View File

@ -787,7 +787,8 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
if (!kernel) {
// user
// allocate a pgdir
map->arch_data->pgdir_virt = memalign(B_PAGE_SIZE, B_PAGE_SIZE);
map->arch_data->pgdir_virt = (page_directory_entry *)memalign(
B_PAGE_SIZE, B_PAGE_SIZE);
if (map->arch_data->pgdir_virt == NULL) {
free(map->arch_data);
recursive_lock_destroy(&map->lock);

View File

@ -4,12 +4,12 @@ UsePrivateHeaders shared ;
KernelMergeObject kernel_vm.o :
vm.cpp
vm_address_space.c
vm_address_space.cpp
vm_cache.cpp
vm_daemons.cpp
vm_low_memory.cpp
vm_page.cpp
vm_store_anonymous_noswap.c
vm_store_anonymous_noswap.cpp
vm_store_device.c
vm_store_null.c
#vm_tests.c

View File

@ -0,0 +1,366 @@
/*
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
#include <vm.h>
#include <vm_address_space.h>
#include <vm_priv.h>
#include <thread.h>
#include <util/khash.h>
#include <stdlib.h>
//#define TRACE_VM
#ifdef TRACE_VM
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
static vm_address_space *sKernelAddressSpace;
#define ASPACE_HASH_TABLE_SIZE 1024
static struct hash_table *sAddressSpaceTable;
static sem_id sAddressSpaceHashSem;
static void
_dump_aspace(vm_address_space *aspace)
{
vm_area *area;
dprintf("dump of address space at %p:\n", aspace);
dprintf("id: 0x%lx\n", aspace->id);
dprintf("ref_count: %ld\n", aspace->ref_count);
dprintf("fault_count: %ld\n", aspace->fault_count);
dprintf("working_set_size: 0x%lx\n", aspace->working_set_size);
dprintf("translation_map: %p\n", &aspace->translation_map);
dprintf("base: 0x%lx\n", aspace->base);
dprintf("size: 0x%lx\n", aspace->size);
dprintf("change_count: 0x%lx\n", aspace->change_count);
dprintf("sem: 0x%lx\n", aspace->sem);
dprintf("area_hint: %p\n", aspace->area_hint);
dprintf("area_list:\n");
for (area = aspace->areas; area != NULL; area = area->address_space_next) {
dprintf(" area 0x%lx: ", area->id);
dprintf("base_addr = 0x%lx ", area->base);
dprintf("size = 0x%lx ", area->size);
dprintf("name = '%s' ", area->name);
dprintf("protection = 0x%lx\n", area->protection);
}
}
static int
dump_aspace(int argc, char **argv)
{
vm_address_space *aspace;
if (argc < 2) {
dprintf("aspace: not enough arguments\n");
return 0;
}
// if the argument looks like a number, treat it as such
{
team_id id = strtoul(argv[1], NULL, 0);
aspace = (vm_address_space *)hash_lookup(sAddressSpaceTable, &id);
if (aspace == NULL) {
dprintf("invalid aspace id\n");
} else {
_dump_aspace(aspace);
}
return 0;
}
return 0;
}
static int
dump_aspace_list(int argc, char **argv)
{
vm_address_space *space;
struct hash_iterator iter;
dprintf("addr\tid\tbase\t\tsize\n");
hash_open(sAddressSpaceTable, &iter);
while ((space = (vm_address_space *)hash_next(sAddressSpaceTable,
&iter)) != NULL) {
dprintf("%p\t0x%lx\t0x%lx\t\t0x%lx\n",
space, space->id, space->base, space->size);
}
hash_close(sAddressSpaceTable, &iter, false);
return 0;
}
static int
aspace_compare(void *_a, const void *key)
{
vm_address_space *aspace = (vm_address_space *)_a;
const team_id *id = (const team_id *)key;
if (aspace->id == *id)
return 0;
return -1;
}
static uint32
aspace_hash(void *_a, const void *key, uint32 range)
{
vm_address_space *aspace = (vm_address_space *)_a;
const team_id *id = (const team_id *)key;
if (aspace != NULL)
return aspace->id % range;
return *id % range;
}
/*! When this function is called, all references to this address space
have been released, so it's safe to remove it.
*/
static void
delete_address_space(vm_address_space *addressSpace)
{
TRACE(("delete_address_space: called on aspace 0x%lx\n", addressSpace->id));
if (addressSpace == sKernelAddressSpace)
panic("tried to delete the kernel aspace!\n");
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
(*addressSpace->translation_map.ops->destroy)(&addressSpace->translation_map);
delete_sem(addressSpace->sem);
free(addressSpace);
}
// #pragma mark -
vm_address_space *
vm_get_address_space_by_id(team_id id)
{
vm_address_space *addressSpace;
acquire_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0, 0);
addressSpace = (vm_address_space *)hash_lookup(sAddressSpaceTable, &id);
if (addressSpace)
atomic_add(&addressSpace->ref_count, 1);
release_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0);
return addressSpace;
}
vm_address_space *
vm_get_kernel_address_space(void)
{
/* we can treat this one a little differently since it can't be deleted */
atomic_add(&sKernelAddressSpace->ref_count, 1);
return sKernelAddressSpace;
}
vm_address_space *
vm_kernel_address_space(void)
{
return sKernelAddressSpace;
}
team_id
vm_kernel_address_space_id(void)
{
return sKernelAddressSpace->id;
}
vm_address_space *
vm_get_current_user_address_space(void)
{
struct thread *thread = thread_get_current_thread();
if (thread != NULL) {
vm_address_space *addressSpace = thread->team->address_space;
if (addressSpace != NULL) {
atomic_add(&addressSpace->ref_count, 1);
return addressSpace;
}
}
return NULL;
}
team_id
vm_current_user_address_space_id(void)
{
struct thread *thread = thread_get_current_thread();
if (thread != NULL && thread->team->address_space != NULL)
return thread->team->id;
return B_ERROR;
}
void
vm_put_address_space(vm_address_space *addressSpace)
{
bool remove = false;
acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0);
if (atomic_add(&addressSpace->ref_count, -1) == 1) {
hash_remove(sAddressSpaceTable, addressSpace);
remove = true;
}
release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0);
if (remove)
delete_address_space(addressSpace);
}
/*! Deletes all areas in the specified address space, and the address
space by decreasing all reference counters. It also marks the
address space of being in deletion state, so that no more areas
can be created in it.
After this, the address space is not operational anymore, but might
still be in memory until the last reference has been released.
*/
void
vm_delete_address_space(vm_address_space *addressSpace)
{
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
addressSpace->state = VM_ASPACE_STATE_DELETION;
release_sem_etc(addressSpace->sem, WRITE_COUNT, 0);
vm_delete_areas(addressSpace);
vm_put_address_space(addressSpace);
}
status_t
vm_create_address_space(team_id id, addr_t base, addr_t size,
bool kernel, vm_address_space **_addressSpace)
{
vm_address_space *addressSpace;
status_t status;
addressSpace = (vm_address_space *)malloc(sizeof(vm_address_space));
if (addressSpace == NULL)
return B_NO_MEMORY;
TRACE(("vm_create_aspace: %s: %lx bytes starting at 0x%lx => %p\n",
name, size, base, addressSpace));
addressSpace->base = base;
addressSpace->size = size;
addressSpace->areas = NULL;
addressSpace->area_hint = NULL;
addressSpace->change_count = 0;
if (!kernel) {
// the kernel address space will create its semaphore later
addressSpace->sem = create_sem(WRITE_COUNT, "address space");
if (addressSpace->sem < B_OK) {
status_t status = addressSpace->sem;
free(addressSpace);
return status;
}
}
addressSpace->id = id;
addressSpace->ref_count = 1;
addressSpace->state = VM_ASPACE_STATE_NORMAL;
addressSpace->fault_count = 0;
addressSpace->scan_va = base;
addressSpace->working_set_size = kernel
? DEFAULT_KERNEL_WORKING_SET : DEFAULT_WORKING_SET;
addressSpace->max_working_set = DEFAULT_MAX_WORKING_SET;
addressSpace->min_working_set = DEFAULT_MIN_WORKING_SET;
addressSpace->last_working_set_adjust = system_time();
// initialize the corresponding translation map
status = arch_vm_translation_map_init_map(&addressSpace->translation_map,
kernel);
if (status < B_OK) {
free(addressSpace);
return status;
}
// add the aspace to the global hash table
acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0);
hash_insert(sAddressSpaceTable, addressSpace);
release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0);
*_addressSpace = addressSpace;
return B_OK;
}
status_t
vm_address_space_init(void)
{
sAddressSpaceHashSem = -1;
// create the area and address space hash tables
{
vm_address_space *aspace;
sAddressSpaceTable = hash_init(ASPACE_HASH_TABLE_SIZE,
(addr_t)&aspace->hash_next - (addr_t)aspace, &aspace_compare,
&aspace_hash);
if (sAddressSpaceTable == NULL)
panic("vm_init: error creating aspace hash table\n");
}
sKernelAddressSpace = NULL;
// create the initial kernel address space
if (vm_create_address_space(1, KERNEL_BASE, KERNEL_SIZE,
true, &sKernelAddressSpace) != B_OK)
panic("vm_init: error creating kernel address space!\n");
add_debugger_command("aspaces", &dump_aspace_list,
"Dump a list of all address spaces");
add_debugger_command("aspace", &dump_aspace,
"Dump info about a particular address space");
return B_OK;
}
status_t
vm_address_space_init_post_sem(void)
{
status_t status = arch_vm_translation_map_init_kernel_map_post_sem(
&sKernelAddressSpace->translation_map);
if (status < B_OK)
return status;
status = sKernelAddressSpace->sem = create_sem(WRITE_COUNT,
"kernel_aspacelock");
if (status < B_OK)
return status;
status = sAddressSpaceHashSem = create_sem(WRITE_COUNT, "aspace_hash_sem");
if (status < B_OK)
return status;
return B_OK;
}

View File

@ -0,0 +1,185 @@
/*
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "vm_store_anonymous_noswap.h"
#include <KernelExport.h>
#include <vm_priv.h>
#include <arch_config.h>
#include <stdlib.h>
//#define TRACE_STORE
#ifdef TRACE_STORE
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
// The stack functionality looks like a good candidate to put into its own
// store. I have not done this because once we have a swap file backing up
// the memory, it would probably not be a good idea to separate this
// anymore.
typedef struct anonymous_store {
vm_store vm;
bool can_overcommit;
bool has_precommitted;
uint8 precommitted_pages;
int32 guarded_size;
} anonymous_store;
static void
anonymous_destroy(struct vm_store *store)
{
vm_unreserve_memory(store->committed_size);
free(store);
}
static status_t
anonymous_commit(struct vm_store *_store, off_t size)
{
anonymous_store *store = (anonymous_store *)_store;
// if we can overcommit, we don't commit here, but in anonymous_fault()
if (store->can_overcommit) {
if (store->has_precommitted)
return B_OK;
// pre-commit some pages to make a later failure less probable
store->has_precommitted = true;
if (size > store->vm.cache->virtual_base + store->precommitted_pages)
size = store->vm.cache->virtual_base + store->precommitted_pages;
}
size -= store->vm.cache->virtual_base;
// anonymous stores don't need to span over their whole source
// Check to see how much we could commit - we need real memory
if (size > store->vm.committed_size) {
// try to commit
if (vm_try_reserve_memory(size - store->vm.committed_size) != B_OK)
return B_NO_MEMORY;
store->vm.committed_size = size;
} else {
// we can release some
vm_unreserve_memory(store->vm.committed_size - size);
}
return B_OK;
}
static bool
anonymous_has_page(struct vm_store *store, off_t offset)
{
return false;
}
static status_t
anonymous_read(struct vm_store *store, off_t offset, const iovec *vecs,
size_t count, size_t *_numBytes, bool fsReenter)
{
panic("anonymous_store: read called. Invalid!\n");
return B_ERROR;
}
static status_t
anonymous_write(struct vm_store *store, off_t offset, const iovec *vecs,
size_t count, size_t *_numBytes, bool fsReenter)
{
// no place to write, this will cause the page daemon to skip this store
return 0;
}
static status_t
anonymous_fault(struct vm_store *_store, struct vm_address_space *aspace,
off_t offset)
{
anonymous_store *store = (anonymous_store *)_store;
if (store->can_overcommit) {
if (store->guarded_size > 0) {
uint32 guardOffset;
#ifdef STACK_GROWS_DOWNWARDS
guardOffset = 0;
#elif defined(STACK_GROWS_UPWARDS)
guardOffset = store->vm.cache->virtual_size - store->guarded_size;
#else
# error Stack direction has not been defined in arch_config.h
#endif
// report stack fault, guard page hit!
if (offset >= guardOffset && offset
< guardOffset + store->guarded_size) {
TRACE(("stack overflow!\n"));
return B_BAD_ADDRESS;
}
}
if (store->precommitted_pages == 0) {
// try to commit additional memory
if (vm_try_reserve_memory(B_PAGE_SIZE) != B_OK)
return B_NO_MEMORY;
} else
store->precommitted_pages--;
store->vm.committed_size += B_PAGE_SIZE;
}
// This will cause vm_soft_fault() to handle the fault
return B_BAD_HANDLER;
}
static vm_store_ops anonymous_ops = {
&anonymous_destroy,
&anonymous_commit,
&anonymous_has_page,
&anonymous_read,
&anonymous_write,
&anonymous_fault,
NULL, // acquire ref
NULL // release ref
};
/*! Create a new vm_store that uses anonymous noswap memory */
vm_store *
vm_store_create_anonymous_noswap(bool canOvercommit,
int32 numPrecommittedPages, int32 numGuardPages)
{
anonymous_store *store = (anonymous_store *)malloc(
sizeof(anonymous_store));
if (store == NULL)
return NULL;
TRACE(("vm_store_create_anonymous(canOvercommit = %s, numGuardPages = %ld) at %p\n",
canOvercommit ? "yes" : "no", numGuardPages, store));
store->vm.ops = &anonymous_ops;
store->vm.cache = NULL;
store->vm.committed_size = 0;
store->can_overcommit = canOvercommit;
store->has_precommitted = numPrecommittedPages != 0;
store->precommitted_pages = min_c(numPrecommittedPages, 255);
store->guarded_size = numGuardPages * B_PAGE_SIZE;
return &store->vm;
}