Forget to actually delete some old C files.

For some reason, shell wildcards do not include deleted files... :)


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22327 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-09-27 12:10:06 +00:00
parent 393fceb5a0
commit 6e03805fb2
2 changed files with 0 additions and 549 deletions

View File

@ -1,365 +0,0 @@
/*
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
#include <vm.h>
#include <vm_address_space.h>
#include <vm_priv.h>
#include <thread.h>
#include <util/khash.h>
#include <stdlib.h>
//#define TRACE_VM
#ifdef TRACE_VM
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
static vm_address_space *sKernelAddressSpace;
#define ASPACE_HASH_TABLE_SIZE 1024
static void *sAddressSpaceTable;
static sem_id sAddressSpaceHashSem;
static void
_dump_aspace(vm_address_space *aspace)
{
vm_area *area;
dprintf("dump of address space at %p:\n", aspace);
dprintf("id: 0x%lx\n", aspace->id);
dprintf("ref_count: %ld\n", aspace->ref_count);
dprintf("fault_count: %ld\n", aspace->fault_count);
dprintf("working_set_size: 0x%lx\n", aspace->working_set_size);
dprintf("translation_map: %p\n", &aspace->translation_map);
dprintf("base: 0x%lx\n", aspace->base);
dprintf("size: 0x%lx\n", aspace->size);
dprintf("change_count: 0x%lx\n", aspace->change_count);
dprintf("sem: 0x%lx\n", aspace->sem);
dprintf("area_hint: %p\n", aspace->area_hint);
dprintf("area_list:\n");
for (area = aspace->areas; area != NULL; area = area->address_space_next) {
dprintf(" area 0x%lx: ", area->id);
dprintf("base_addr = 0x%lx ", area->base);
dprintf("size = 0x%lx ", area->size);
dprintf("name = '%s' ", area->name);
dprintf("protection = 0x%lx\n", area->protection);
}
}
static int
dump_aspace(int argc, char **argv)
{
vm_address_space *aspace;
if (argc < 2) {
dprintf("aspace: not enough arguments\n");
return 0;
}
// if the argument looks like a number, treat it as such
{
team_id id = strtoul(argv[1], NULL, 0);
aspace = hash_lookup(sAddressSpaceTable, &id);
if (aspace == NULL) {
dprintf("invalid aspace id\n");
} else {
_dump_aspace(aspace);
}
return 0;
}
return 0;
}
static int
dump_aspace_list(int argc, char **argv)
{
vm_address_space *as;
struct hash_iterator iter;
dprintf("addr\tid\tbase\t\tsize\n");
hash_open(sAddressSpaceTable, &iter);
while ((as = hash_next(sAddressSpaceTable, &iter)) != NULL) {
dprintf("%p\t0x%lx\t0x%lx\t\t0x%lx\n",
as, as->id, as->base, as->size);
}
hash_close(sAddressSpaceTable, &iter, false);
return 0;
}
static int
aspace_compare(void *_a, const void *key)
{
vm_address_space *aspace = _a;
const team_id *id = key;
if (aspace->id == *id)
return 0;
return -1;
}
static uint32
aspace_hash(void *_a, const void *key, uint32 range)
{
vm_address_space *aspace = _a;
const team_id *id = key;
if (aspace != NULL)
return aspace->id % range;
return *id % range;
}
/*! When this function is called, all references to this address space
have been released, so it's safe to remove it.
*/
static void
delete_address_space(vm_address_space *addressSpace)
{
TRACE(("delete_address_space: called on aspace 0x%lx\n", addressSpace->id));
if (addressSpace == sKernelAddressSpace)
panic("tried to delete the kernel aspace!\n");
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
(*addressSpace->translation_map.ops->destroy)(&addressSpace->translation_map);
delete_sem(addressSpace->sem);
free(addressSpace);
}
// #pragma mark -
vm_address_space *
vm_get_address_space_by_id(team_id id)
{
vm_address_space *addressSpace;
acquire_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0, 0);
addressSpace = hash_lookup(sAddressSpaceTable, &id);
if (addressSpace)
atomic_add(&addressSpace->ref_count, 1);
release_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0);
return addressSpace;
}
vm_address_space *
vm_get_kernel_address_space(void)
{
/* we can treat this one a little differently since it can't be deleted */
atomic_add(&sKernelAddressSpace->ref_count, 1);
return sKernelAddressSpace;
}
vm_address_space *
vm_kernel_address_space(void)
{
return sKernelAddressSpace;
}
team_id
vm_kernel_address_space_id(void)
{
return sKernelAddressSpace->id;
}
vm_address_space *
vm_get_current_user_address_space(void)
{
struct thread *thread = thread_get_current_thread();
if (thread != NULL) {
vm_address_space *addressSpace = thread->team->address_space;
if (addressSpace != NULL) {
atomic_add(&addressSpace->ref_count, 1);
return addressSpace;
}
}
return NULL;
}
team_id
vm_current_user_address_space_id(void)
{
struct thread *thread = thread_get_current_thread();
if (thread != NULL && thread->team->address_space != NULL)
return thread->team->id;
return B_ERROR;
}
void
vm_put_address_space(vm_address_space *addressSpace)
{
bool remove = false;
acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0);
if (atomic_add(&addressSpace->ref_count, -1) == 1) {
hash_remove(sAddressSpaceTable, addressSpace);
remove = true;
}
release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0);
if (remove)
delete_address_space(addressSpace);
}
/*! Deletes all areas in the specified address space, and the address
space by decreasing all reference counters. It also marks the
address space of being in deletion state, so that no more areas
can be created in it.
After this, the address space is not operational anymore, but might
still be in memory until the last reference has been released.
*/
void
vm_delete_address_space(vm_address_space *addressSpace)
{
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
addressSpace->state = VM_ASPACE_STATE_DELETION;
release_sem_etc(addressSpace->sem, WRITE_COUNT, 0);
vm_delete_areas(addressSpace);
vm_put_address_space(addressSpace);
}
status_t
vm_create_address_space(team_id id, addr_t base, addr_t size,
bool kernel, vm_address_space **_addressSpace)
{
vm_address_space *addressSpace;
status_t status;
addressSpace = (vm_address_space *)malloc(sizeof(vm_address_space));
if (addressSpace == NULL)
return B_NO_MEMORY;
TRACE(("vm_create_aspace: %s: %lx bytes starting at 0x%lx => %p\n",
name, size, base, addressSpace));
addressSpace->base = base;
addressSpace->size = size;
addressSpace->areas = NULL;
addressSpace->area_hint = NULL;
addressSpace->change_count = 0;
if (!kernel) {
// the kernel address space will create its semaphore later
addressSpace->sem = create_sem(WRITE_COUNT, "address space");
if (addressSpace->sem < B_OK) {
status_t status = addressSpace->sem;
free(addressSpace);
return status;
}
}
addressSpace->id = id;
addressSpace->ref_count = 1;
addressSpace->state = VM_ASPACE_STATE_NORMAL;
addressSpace->fault_count = 0;
addressSpace->scan_va = base;
addressSpace->working_set_size = kernel
? DEFAULT_KERNEL_WORKING_SET : DEFAULT_WORKING_SET;
addressSpace->max_working_set = DEFAULT_MAX_WORKING_SET;
addressSpace->min_working_set = DEFAULT_MIN_WORKING_SET;
addressSpace->last_working_set_adjust = system_time();
// initialize the corresponding translation map
status = arch_vm_translation_map_init_map(&addressSpace->translation_map,
kernel);
if (status < B_OK) {
free(addressSpace);
return status;
}
// add the aspace to the global hash table
acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0);
hash_insert(sAddressSpaceTable, addressSpace);
release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0);
*_addressSpace = addressSpace;
return B_OK;
}
status_t
vm_address_space_init(void)
{
sAddressSpaceHashSem = -1;
// create the area and address space hash tables
{
vm_address_space *aspace;
sAddressSpaceTable = hash_init(ASPACE_HASH_TABLE_SIZE,
(addr_t)&aspace->hash_next - (addr_t)aspace, &aspace_compare,
&aspace_hash);
if (sAddressSpaceTable == NULL)
panic("vm_init: error creating aspace hash table\n");
}
sKernelAddressSpace = NULL;
// create the initial kernel address space
if (vm_create_address_space(1, KERNEL_BASE, KERNEL_SIZE,
true, &sKernelAddressSpace) != B_OK)
panic("vm_init: error creating kernel address space!\n");
add_debugger_command("aspaces", &dump_aspace_list,
"Dump a list of all address spaces");
add_debugger_command("aspace", &dump_aspace,
"Dump info about a particular address space");
return B_OK;
}
status_t
vm_address_space_init_post_sem(void)
{
status_t status = arch_vm_translation_map_init_kernel_map_post_sem(
&sKernelAddressSpace->translation_map);
if (status < B_OK)
return status;
status = sKernelAddressSpace->sem = create_sem(WRITE_COUNT,
"kernel_aspacelock");
if (status < B_OK)
return status;
status = sAddressSpaceHashSem = create_sem(WRITE_COUNT, "aspace_hash_sem");
if (status < B_OK)
return status;
return B_OK;
}

View File

@ -1,184 +0,0 @@
/*
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "vm_store_anonymous_noswap.h"
#include <KernelExport.h>
#include <vm_priv.h>
#include <arch_config.h>
#include <stdlib.h>
//#define TRACE_STORE
#ifdef TRACE_STORE
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
// The stack functionality looks like a good candidate to put into its own
// store. I have not done this because once we have a swap file backing up
// the memory, it would probably not be a good idea to separate this
// anymore.
typedef struct anonymous_store {
vm_store vm;
bool can_overcommit;
bool has_precommitted;
uint8 precommitted_pages;
int32 guarded_size;
} anonymous_store;
static void
anonymous_destroy(struct vm_store *store)
{
vm_unreserve_memory(store->committed_size);
free(store);
}
static status_t
anonymous_commit(struct vm_store *_store, off_t size)
{
anonymous_store *store = (anonymous_store *)_store;
// if we can overcommit, we don't commit here, but in anonymous_fault()
if (store->can_overcommit) {
if (store->has_precommitted)
return B_OK;
// pre-commit some pages to make a later failure less probable
store->has_precommitted = true;
if (size > store->vm.cache->virtual_base + store->precommitted_pages)
size = store->vm.cache->virtual_base + store->precommitted_pages;
}
size -= store->vm.cache->virtual_base;
// anonymous stores don't need to span over their whole source
// Check to see how much we could commit - we need real memory
if (size > store->vm.committed_size) {
// try to commit
if (vm_try_reserve_memory(size - store->vm.committed_size) != B_OK)
return B_NO_MEMORY;
store->vm.committed_size = size;
} else {
// we can release some
vm_unreserve_memory(store->vm.committed_size - size);
}
return B_OK;
}
static bool
anonymous_has_page(struct vm_store *store, off_t offset)
{
return false;
}
static status_t
anonymous_read(struct vm_store *store, off_t offset, const iovec *vecs, size_t count,
size_t *_numBytes, bool fsReenter)
{
panic("anonymous_store: read called. Invalid!\n");
return B_ERROR;
}
static status_t
anonymous_write(struct vm_store *store, off_t offset, const iovec *vecs, size_t count,
size_t *_numBytes, bool fsReenter)
{
// no place to write, this will cause the page daemon to skip this store
return 0;
}
static status_t
anonymous_fault(struct vm_store *_store, struct vm_address_space *aspace, off_t offset)
{
anonymous_store *store = (anonymous_store *)_store;
if (store->can_overcommit) {
if (store->guarded_size > 0) {
uint32 guardOffset;
#ifdef STACK_GROWS_DOWNWARDS
guardOffset = 0;
#elif defined(STACK_GROWS_UPWARDS)
guardOffset = store->vm.cache->virtual_size - store->guarded_size;
#else
# error Stack direction has not been defined in arch_config.h
#endif
// report stack fault, guard page hit!
if (offset >= guardOffset && offset < guardOffset + store->guarded_size) {
TRACE(("stack overflow!\n"));
return B_BAD_ADDRESS;
}
}
if (store->precommitted_pages == 0) {
// try to commit additional memory
if (vm_try_reserve_memory(B_PAGE_SIZE) != B_OK)
return B_NO_MEMORY;
} else
store->precommitted_pages--;
store->vm.committed_size += B_PAGE_SIZE;
}
// This will cause vm_soft_fault() to handle the fault
return B_BAD_HANDLER;
}
static vm_store_ops anonymous_ops = {
&anonymous_destroy,
&anonymous_commit,
&anonymous_has_page,
&anonymous_read,
&anonymous_write,
&anonymous_fault,
NULL, // acquire ref
NULL // release ref
};
/* vm_store_create_anonymous
* Create a new vm_store that uses anonymous noswap memory
*/
vm_store *
vm_store_create_anonymous_noswap(bool canOvercommit, int32 numPrecommittedPages, int32 numGuardPages)
{
anonymous_store *store = malloc(sizeof(anonymous_store));
if (store == NULL)
return NULL;
TRACE(("vm_store_create_anonymous(canOvercommit = %s, numGuardPages = %ld) at %p\n",
canOvercommit ? "yes" : "no", numGuardPages, store));
store->vm.ops = &anonymous_ops;
store->vm.cache = NULL;
store->vm.committed_size = 0;
store->can_overcommit = canOvercommit;
store->has_precommitted = numPrecommittedPages != 0;
store->precommitted_pages = min(numPrecommittedPages, 255);
store->guarded_size = numGuardPages * B_PAGE_SIZE;
return &store->vm;
}