Applied NewOS change 1914: fixes small bug (one page off), calls

{get|put}_physical_page_tmap() directly.
Also replaced all occurrences of "addr" with "addr_t".


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@5938 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2004-01-06 16:12:52 +00:00
parent cd9ccc0712
commit 86228d9f03

View File

@ -40,7 +40,7 @@
typedef struct paddr_chunk_descriptor {
struct paddr_chunk_descriptor *next_q; // must remain first in structure, queue code uses it
int ref_count;
addr va;
addr_t va;
} paddr_chunk_desc;
static paddr_chunk_desc *paddr_desc; // will be one per physical chunk
@ -59,7 +59,7 @@ typedef struct vm_translation_map_arch_info_struct {
pdentry *pgdir_virt;
pdentry *pgdir_phys;
int num_invalidate_pages;
addr pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE];
} vm_translation_map_arch_info;
@ -84,7 +84,9 @@ static spinlock tmap_list_lock;
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
static int vm_translation_map_quick_query(addr va, addr *out_physical);
static int vm_translation_map_quick_query(addr_t va, addr_t *out_physical);
static int get_physical_page_tmap(addr_t pa, addr_t *va, int flags);
static int put_physical_page_tmap(addr_t va);
static void flush_tmap(vm_translation_map *map);
@ -182,8 +184,8 @@ destroy_tmap(vm_translation_map *map)
if (map->arch_data->pgdir_virt != NULL) {
// cycle through and free all of the user space pgtables
for (i = VADDR_TO_PDENT(USER_BASE); i < VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
addr pgtable_addr;
for (i = VADDR_TO_PDENT(USER_BASE); i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
addr_t pgtable_addr;
vm_page *page;
if (map->arch_data->pgdir_virt[i].present == 1) {
@ -203,7 +205,7 @@ destroy_tmap(vm_translation_map *map)
static void
put_pgtable_in_pgdir(pdentry *e, addr pgtable_phys, int attributes)
put_pgtable_in_pgdir(pdentry *e, addr_t pgtable_phys, int attributes)
{
// put it in the pgdir
init_pdentry(e);
@ -215,7 +217,7 @@ put_pgtable_in_pgdir(pdentry *e, addr pgtable_phys, int attributes)
static int
map_tmap(vm_translation_map *map, addr va, addr pa, unsigned int attributes)
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, unsigned int attributes)
{
pdentry *pd;
ptentry *pt;
@ -237,11 +239,15 @@ map_tmap(vm_translation_map *map, addr va, addr pa, unsigned int attributes)
// check to see if a page table exists for this range
index = VADDR_TO_PDENT(va);
if (pd[index].present == 0) {
addr pgtable;
addr_t pgtable;
vm_page *page;
// we need to allocate a pgtable
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
// mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED);
pgtable = page->ppn * PAGE_SIZE;
TRACE(("map_tmap: asked for free page for pgtable. 0x%x\n", pgtable));
@ -258,7 +264,7 @@ map_tmap(vm_translation_map *map, addr va, addr pa, unsigned int attributes)
// now, fill in the pentry
do {
err = vm_get_physical_page(ADDR_REVERSE_SHIFT(pd[index].addr), (addr *)&pt, PHYSICAL_PAGE_NO_WAIT);
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
index = VADDR_TO_PTENT(va);
@ -268,7 +274,7 @@ map_tmap(vm_translation_map *map, addr va, addr pa, unsigned int attributes)
pt[index].rw = attributes & LOCK_RW;
pt[index].present = 1;
vm_put_physical_page((addr)pt);
put_physical_page_tmap((addr_t)pt);
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va;
@ -282,7 +288,7 @@ map_tmap(vm_translation_map *map, addr va, addr pa, unsigned int attributes)
static int
unmap_tmap(vm_translation_map *map, addr start, addr end)
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
{
ptentry *pt;
pdentry *pd = map->arch_data->pgdir_virt;
@ -306,7 +312,7 @@ restart:
}
do {
err = vm_get_physical_page(ADDR_REVERSE_SHIFT(pd[index].addr), (addr *)&pt, PHYSICAL_PAGE_NO_WAIT);
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end); index++, start += PAGE_SIZE) {
@ -326,14 +332,14 @@ restart:
map->arch_data->num_invalidate_pages++;
}
vm_put_physical_page((addr)pt);
put_physical_page_tmap((addr_t)pt);
goto restart;
}
static int
query_tmap(vm_translation_map *map, addr va, addr *out_physical, unsigned int *out_flags)
query_tmap(vm_translation_map *map, addr_t va, addr_t *out_physical, unsigned int *out_flags)
{
ptentry *pt;
pdentry *pd = map->arch_data->pgdir_virt;
@ -351,7 +357,7 @@ query_tmap(vm_translation_map *map, addr va, addr *out_physical, unsigned int *o
}
do {
err = vm_get_physical_page(ADDR_REVERSE_SHIFT(pd[index].addr), (addr *)&pt, PHYSICAL_PAGE_NO_WAIT);
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
index = VADDR_TO_PTENT(va);
@ -365,7 +371,7 @@ query_tmap(vm_translation_map *map, addr va, addr *out_physical, unsigned int *o
*out_flags |= pt[index].accessed ? PAGE_ACCESSED : 0;
*out_flags |= pt[index].present ? PAGE_PRESENT : 0;
vm_put_physical_page((addr)pt);
put_physical_page_tmap((addr_t)pt);
TRACE(("query_tmap: returning pa 0x%x for va 0x%x\n", *out_physical, va));
@ -373,7 +379,7 @@ query_tmap(vm_translation_map *map, addr va, addr *out_physical, unsigned int *o
}
static addr
static addr_t
get_mapped_size_tmap(vm_translation_map *map)
{
return map->map_count;
@ -381,7 +387,7 @@ get_mapped_size_tmap(vm_translation_map *map)
static int
protect_tmap(vm_translation_map *map, addr base, addr top, unsigned int attributes)
protect_tmap(vm_translation_map *map, addr_t base, addr_t top, unsigned int attributes)
{
// XXX finish
panic("protect_tmap called, not implemented\n");
@ -390,7 +396,7 @@ protect_tmap(vm_translation_map *map, addr base, addr top, unsigned int attribut
static int
clear_flags_tmap(vm_translation_map *map, addr va, unsigned int flags)
clear_flags_tmap(vm_translation_map *map, addr_t va, unsigned int flags)
{
ptentry *pt;
pdentry *pd = map->arch_data->pgdir_virt;
@ -405,7 +411,7 @@ clear_flags_tmap(vm_translation_map *map, addr va, unsigned int flags)
}
do {
err = vm_get_physical_page(ADDR_REVERSE_SHIFT(pd[index].addr), (addr *)&pt, PHYSICAL_PAGE_NO_WAIT);
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
index = VADDR_TO_PTENT(va);
@ -419,7 +425,7 @@ clear_flags_tmap(vm_translation_map *map, addr va, unsigned int flags)
tlb_flush = true;
}
vm_put_physical_page((addr)pt);
put_physical_page_tmap((addr_t)pt);
if (tlb_flush) {
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE)
@ -460,11 +466,11 @@ flush_tmap(vm_translation_map *map)
static int
map_iospace_chunk(addr va, addr pa)
map_iospace_chunk(addr_t va, addr_t pa)
{
int i;
ptentry *pt;
addr ppn;
addr_t ppn;
int state;
pa &= ~(PAGE_SIZE - 1); // make sure it's page aligned
@ -493,7 +499,7 @@ map_iospace_chunk(addr va, addr pa)
static int
get_physical_page_tmap(addr pa, addr *va, int flags)
get_physical_page_tmap(addr_t pa, addr_t *va, int flags)
{
int index;
paddr_chunk_desc *replaced_pchunk;
@ -561,7 +567,7 @@ restart:
static int
put_physical_page_tmap(addr va)
put_physical_page_tmap(addr_t va)
{
paddr_chunk_desc *desc;
@ -643,14 +649,14 @@ vm_translation_map_create(vm_translation_map *new_map, bool kernel)
recursive_lock_destroy(&new_map->lock);
return ENOMEM;
}
if (((addr)new_map->arch_data->pgdir_virt % PAGE_SIZE) != 0)
if (((addr_t)new_map->arch_data->pgdir_virt % PAGE_SIZE) != 0)
panic("vm_translation_map_create: malloced pgdir and found it wasn't aligned!\n");
vm_get_page_mapping(vm_get_kernel_aspace_id(), (addr)new_map->arch_data->pgdir_virt, (addr *)&new_map->arch_data->pgdir_phys);
vm_get_page_mapping(vm_get_kernel_aspace_id(), (addr_t)new_map->arch_data->pgdir_virt, (addr_t *)&new_map->arch_data->pgdir_phys);
} else {
// kernel
// we already know the kernel pgdir mapping
(addr)new_map->arch_data->pgdir_virt = kernel_pgdir_virt;
(addr)new_map->arch_data->pgdir_phys = kernel_pgdir_phys;
(addr_t)new_map->arch_data->pgdir_virt = kernel_pgdir_virt;
(addr_t)new_map->arch_data->pgdir_phys = kernel_pgdir_phys;
}
// zero out the bottom portion of the new pgdir
@ -721,12 +727,12 @@ vm_translation_map_module_init(kernel_args *ka)
// put the array of pgtables directly into the kernel pagedir
// these will be wired and kept mapped into virtual space to be easy to get to
{
addr phys_pgtable;
addr virt_pgtable;
addr_t phys_pgtable;
addr_t virt_pgtable;
pdentry *e;
int i;
virt_pgtable = (addr)iospace_pgtables;
virt_pgtable = (addr_t)iospace_pgtables;
for (i = 0; i < (IOSPACE_SIZE / (PAGE_SIZE * 1024)); i++, virt_pgtable += PAGE_SIZE) {
vm_translation_map_quick_query(virt_pgtable, &phys_pgtable);
e = &page_hole_pgdir[(IOSPACE_BASE / (PAGE_SIZE * 1024)) + i];
@ -810,7 +816,7 @@ vm_translation_map_quick_map(kernel_args *ka, addr_t va, addr_t pa,
// check to see if a page table exists for this range
index = VADDR_TO_PDENT(va);
if (page_hole_pgdir[index].present == 0) {
addr pgtable;
addr_t pgtable;
pdentry *e;
// we need to allocate a pgtable
pgtable = get_free_page(ka);
@ -844,7 +850,7 @@ vm_translation_map_quick_map(kernel_args *ka, addr_t va, addr_t pa,
// XXX currently assumes this translation map is active
static int
vm_translation_map_quick_query(addr va, addr *out_physical)
vm_translation_map_quick_query(addr_t va, addr_t *out_physical)
{
ptentry *pentry;