x86 page mapping:
* Removed the page_{table,directory}_entry structures. The bit fields are nice in principle, but modifying individual flags this way is inherently non-atomic and we need atomicity in some situations. * Use atomic operations in protect_tmap(), clear_flags_tmap(), and others. * Aligned the query_tmap_interrupt() semantics with that of query_tmap(). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35058 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
b82db87162
commit
9435ae9395
@ -131,48 +131,41 @@ vm_translation_map_arch_info::Delete()
|
|||||||
static status_t
|
static status_t
|
||||||
early_query(addr_t va, addr_t *_physicalAddress)
|
early_query(addr_t va, addr_t *_physicalAddress)
|
||||||
{
|
{
|
||||||
page_table_entry *pentry;
|
if ((sPageHolePageDir[VADDR_TO_PDENT(va)] & X86_PDE_PRESENT) == 0) {
|
||||||
|
|
||||||
if (sPageHolePageDir[VADDR_TO_PDENT(va)].present == 0) {
|
|
||||||
// no pagetable here
|
// no pagetable here
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
pentry = sPageHole + va / B_PAGE_SIZE;
|
page_table_entry* pentry = sPageHole + va / B_PAGE_SIZE;
|
||||||
if (pentry->present == 0) {
|
if ((*pentry & X86_PTE_PRESENT) == 0) {
|
||||||
// page mapping not valid
|
// page mapping not valid
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
*_physicalAddress = pentry->addr << 12;
|
*_physicalAddress = *pentry & X86_PTE_ADDRESS_MASK;
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
put_page_table_entry_in_pgtable(page_table_entry *entry,
|
put_page_table_entry_in_pgtable(page_table_entry* entry,
|
||||||
addr_t physicalAddress, uint32 attributes, bool globalPage)
|
addr_t physicalAddress, uint32 attributes, bool globalPage)
|
||||||
{
|
{
|
||||||
page_table_entry page;
|
page_table_entry page = (physicalAddress & X86_PTE_ADDRESS_MASK)
|
||||||
init_page_table_entry(&page);
|
| X86_PTE_PRESENT | (globalPage ? X86_PTE_GLOBAL : 0);
|
||||||
|
|
||||||
page.addr = ADDR_SHIFT(physicalAddress);
|
|
||||||
|
|
||||||
// if the page is user accessible, it's automatically
|
// if the page is user accessible, it's automatically
|
||||||
// accessible in kernel space, too (but with the same
|
// accessible in kernel space, too (but with the same
|
||||||
// protection)
|
// protection)
|
||||||
page.user = (attributes & B_USER_PROTECTION) != 0;
|
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||||
if (page.user)
|
page |= X86_PTE_USER;
|
||||||
page.rw = (attributes & B_WRITE_AREA) != 0;
|
if ((attributes & B_WRITE_AREA) != 0)
|
||||||
else
|
page |= X86_PTE_WRITABLE;
|
||||||
page.rw = (attributes & B_KERNEL_WRITE_AREA) != 0;
|
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||||
page.present = 1;
|
page |= X86_PTE_WRITABLE;
|
||||||
|
|
||||||
if (globalPage)
|
|
||||||
page.global = 1;
|
|
||||||
|
|
||||||
// put it in the page table
|
// put it in the page table
|
||||||
update_page_table_entry(entry, &page);
|
*entry = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -204,24 +197,19 @@ x86_update_all_pgdirs(int index, page_directory_entry e)
|
|||||||
|
|
||||||
void
|
void
|
||||||
x86_put_pgtable_in_pgdir(page_directory_entry *entry,
|
x86_put_pgtable_in_pgdir(page_directory_entry *entry,
|
||||||
addr_t pgtable_phys, uint32 attributes)
|
addr_t pgtablePhysical, uint32 attributes)
|
||||||
{
|
{
|
||||||
page_directory_entry table;
|
*entry = (pgtablePhysical & X86_PDE_ADDRESS_MASK)
|
||||||
// put it in the pgdir
|
| X86_PDE_PRESENT
|
||||||
init_page_directory_entry(&table);
|
| X86_PDE_WRITABLE
|
||||||
table.addr = ADDR_SHIFT(pgtable_phys);
|
| X86_PDE_USER;
|
||||||
|
// TODO: we ignore the attributes of the page table - for compatibility
|
||||||
// ToDo: we ignore the attributes of the page table - for compatibility
|
// with BeOS we allow having user accessible areas in the kernel address
|
||||||
// with BeOS we allow having user accessible areas in the kernel address
|
// space. This is currently being used by some drivers, mainly for the
|
||||||
// space. This is currently being used by some drivers, mainly for the
|
// frame buffer. Our current real time data implementation makes use of
|
||||||
// frame buffer. Our current real time data implementation makes use of
|
// this fact, too.
|
||||||
// this fact, too.
|
// We might want to get rid of this possibility one day, especially if
|
||||||
// We might want to get rid of this possibility one day, especially if
|
// we intend to port it to a platform that does not support this.
|
||||||
// we intend to port it to a platform that does not support this.
|
|
||||||
table.user = 1;
|
|
||||||
table.rw = 1;
|
|
||||||
table.present = 1;
|
|
||||||
update_page_directory_entry(entry, &table);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -266,12 +254,10 @@ destroy_tmap(vm_translation_map *map)
|
|||||||
// cycle through and free all of the user space pgtables
|
// cycle through and free all of the user space pgtables
|
||||||
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
|
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
|
||||||
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
|
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
|
||||||
addr_t pgtable_addr;
|
if ((map->arch_data->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
|
||||||
vm_page *page;
|
addr_t address = map->arch_data->pgdir_virt[i]
|
||||||
|
& X86_PDE_ADDRESS_MASK;
|
||||||
if (map->arch_data->pgdir_virt[i].present == 1) {
|
vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
|
||||||
pgtable_addr = map->arch_data->pgdir_virt[i].addr;
|
|
||||||
page = vm_lookup_page(pgtable_addr);
|
|
||||||
if (!page)
|
if (!page)
|
||||||
panic("destroy_tmap: didn't find pgtable page\n");
|
panic("destroy_tmap: didn't find pgtable page\n");
|
||||||
DEBUG_PAGE_ACCESS_START(page);
|
DEBUG_PAGE_ACCESS_START(page);
|
||||||
@ -342,10 +328,6 @@ map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
|
|||||||
static status_t
|
static status_t
|
||||||
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||||
{
|
{
|
||||||
page_directory_entry *pd;
|
|
||||||
page_table_entry *pt;
|
|
||||||
unsigned int index;
|
|
||||||
|
|
||||||
TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
|
TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -356,11 +338,11 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
|||||||
dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
|
dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
|
||||||
dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
|
dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
|
||||||
*/
|
*/
|
||||||
pd = map->arch_data->pgdir_virt;
|
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||||
|
|
||||||
// check to see if a page table exists for this range
|
// check to see if a page table exists for this range
|
||||||
index = VADDR_TO_PDENT(va);
|
uint32 index = VADDR_TO_PDENT(va);
|
||||||
if (pd[index].present == 0) {
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
addr_t pgtable;
|
addr_t pgtable;
|
||||||
vm_page *page;
|
vm_page *page;
|
||||||
|
|
||||||
@ -393,8 +375,8 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
|||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
ThreadCPUPinner pinner(thread);
|
ThreadCPUPinner pinner(thread);
|
||||||
|
|
||||||
pt = map->arch_data->page_mapper->GetPageTableAt(
|
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||||
ADDR_REVERSE_SHIFT(pd[index].addr));
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
index = VADDR_TO_PTENT(va);
|
index = VADDR_TO_PTENT(va);
|
||||||
|
|
||||||
put_page_table_entry_in_pgtable(&pt[index], pa, attributes,
|
put_page_table_entry_in_pgtable(&pt[index], pa, attributes,
|
||||||
@ -418,9 +400,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
|||||||
static status_t
|
static status_t
|
||||||
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
||||||
{
|
{
|
||||||
page_table_entry *pt;
|
|
||||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||||
int index;
|
|
||||||
|
|
||||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||||
end = ROUNDUP(end, B_PAGE_SIZE);
|
end = ROUNDUP(end, B_PAGE_SIZE);
|
||||||
@ -431,8 +411,8 @@ restart:
|
|||||||
if (start >= end)
|
if (start >= end)
|
||||||
return B_OK;
|
return B_OK;
|
||||||
|
|
||||||
index = VADDR_TO_PDENT(start);
|
int index = VADDR_TO_PDENT(start);
|
||||||
if (pd[index].present == 0) {
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
// no pagetable here, move the start up to access the next page table
|
// no pagetable here, move the start up to access the next page table
|
||||||
start = ROUNDUP(start + 1, B_PAGE_SIZE);
|
start = ROUNDUP(start + 1, B_PAGE_SIZE);
|
||||||
goto restart;
|
goto restart;
|
||||||
@ -441,19 +421,19 @@ restart:
|
|||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
ThreadCPUPinner pinner(thread);
|
ThreadCPUPinner pinner(thread);
|
||||||
|
|
||||||
pt = map->arch_data->page_mapper->GetPageTableAt(
|
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||||
ADDR_REVERSE_SHIFT(pd[index].addr));
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
|
|
||||||
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
||||||
index++, start += B_PAGE_SIZE) {
|
index++, start += B_PAGE_SIZE) {
|
||||||
if (pt[index].present == 0) {
|
if ((pt[index] & X86_PTE_PRESENT) == 0) {
|
||||||
// page mapping not valid
|
// page mapping not valid
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
TRACE(("unmap_tmap: removing page 0x%lx\n", start));
|
TRACE(("unmap_tmap: removing page 0x%lx\n", start));
|
||||||
|
|
||||||
pt[index].present = 0;
|
clear_page_table_entry_flags(&pt[index], X86_PTE_PRESENT);
|
||||||
map->map_count--;
|
map->map_count--;
|
||||||
|
|
||||||
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
||||||
@ -474,37 +454,37 @@ static status_t
|
|||||||
query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical,
|
query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||||
uint32 *_flags)
|
uint32 *_flags)
|
||||||
{
|
{
|
||||||
page_table_entry *pt;
|
|
||||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
|
||||||
int32 index;
|
|
||||||
|
|
||||||
// default the flags to not present
|
// default the flags to not present
|
||||||
*_flags = 0;
|
*_flags = 0;
|
||||||
*_physical = 0;
|
*_physical = 0;
|
||||||
|
|
||||||
index = VADDR_TO_PDENT(va);
|
int index = VADDR_TO_PDENT(va);
|
||||||
if (pd[index].present == 0) {
|
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||||
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
// no pagetable here
|
// no pagetable here
|
||||||
return B_NO_ERROR;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
ThreadCPUPinner pinner(thread);
|
ThreadCPUPinner pinner(thread);
|
||||||
|
|
||||||
pt = map->arch_data->page_mapper->GetPageTableAt(
|
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||||
ADDR_REVERSE_SHIFT(pd[index].addr));
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
index = VADDR_TO_PTENT(va);
|
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||||
|
|
||||||
*_physical = ADDR_REVERSE_SHIFT(pt[index].addr);
|
*_physical = entry & X86_PDE_ADDRESS_MASK;
|
||||||
|
|
||||||
// read in the page state flags
|
// read in the page state flags
|
||||||
if (pt[index].user)
|
if ((entry & X86_PTE_USER) != 0) {
|
||||||
*_flags |= (pt[index].rw ? B_WRITE_AREA : 0) | B_READ_AREA;
|
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||||
|
| B_READ_AREA;
|
||||||
|
}
|
||||||
|
|
||||||
*_flags |= ((pt[index].rw ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA)
|
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||||
| (pt[index].dirty ? PAGE_MODIFIED : 0)
|
| B_KERNEL_READ_AREA
|
||||||
| (pt[index].accessed ? PAGE_ACCESSED : 0)
|
| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||||
| (pt[index].present ? PAGE_PRESENT : 0);
|
| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||||
|
| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||||
|
|
||||||
pinner.Unlock();
|
pinner.Unlock();
|
||||||
|
|
||||||
@ -518,30 +498,34 @@ static status_t
|
|||||||
query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||||
uint32 *_flags)
|
uint32 *_flags)
|
||||||
{
|
{
|
||||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
*_flags = 0;
|
||||||
page_table_entry *pt;
|
|
||||||
addr_t physicalPageTable;
|
|
||||||
int32 index;
|
|
||||||
|
|
||||||
*_physical = 0;
|
*_physical = 0;
|
||||||
|
|
||||||
index = VADDR_TO_PDENT(va);
|
int index = VADDR_TO_PDENT(va);
|
||||||
if (pd[index].present == 0) {
|
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||||
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
// no pagetable here
|
// no pagetable here
|
||||||
return B_ERROR;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
// map page table entry
|
// map page table entry
|
||||||
physicalPageTable = ADDR_REVERSE_SHIFT(pd[index].addr);
|
page_table_entry* pt = gPhysicalPageMapper->InterruptGetPageTableAt(
|
||||||
pt = gPhysicalPageMapper->InterruptGetPageTableAt(physicalPageTable);
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
|
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||||
|
|
||||||
index = VADDR_TO_PTENT(va);
|
*_physical = entry & X86_PDE_ADDRESS_MASK;
|
||||||
*_physical = ADDR_REVERSE_SHIFT(pt[index].addr);
|
|
||||||
|
|
||||||
*_flags |= ((pt[index].rw ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA)
|
// read in the page state flags
|
||||||
| (pt[index].dirty ? PAGE_MODIFIED : 0)
|
if ((entry & X86_PTE_USER) != 0) {
|
||||||
| (pt[index].accessed ? PAGE_ACCESSED : 0)
|
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||||
| (pt[index].present ? PAGE_PRESENT : 0);
|
| B_READ_AREA;
|
||||||
|
}
|
||||||
|
|
||||||
|
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||||
|
| B_KERNEL_READ_AREA
|
||||||
|
| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||||
|
| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||||
|
| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
@ -558,9 +542,7 @@ static status_t
|
|||||||
protect_tmap(vm_translation_map *map, addr_t start, addr_t end,
|
protect_tmap(vm_translation_map *map, addr_t start, addr_t end,
|
||||||
uint32 attributes)
|
uint32 attributes)
|
||||||
{
|
{
|
||||||
page_table_entry *pt;
|
|
||||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||||
int index;
|
|
||||||
|
|
||||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||||
end = ROUNDUP(end, B_PAGE_SIZE);
|
end = ROUNDUP(end, B_PAGE_SIZE);
|
||||||
@ -572,8 +554,8 @@ restart:
|
|||||||
if (start >= end)
|
if (start >= end)
|
||||||
return B_OK;
|
return B_OK;
|
||||||
|
|
||||||
index = VADDR_TO_PDENT(start);
|
int index = VADDR_TO_PDENT(start);
|
||||||
if (pd[index].present == 0) {
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
// no pagetable here, move the start up to access the next page table
|
// no pagetable here, move the start up to access the next page table
|
||||||
start = ROUNDUP(start + 1, B_PAGE_SIZE);
|
start = ROUNDUP(start + 1, B_PAGE_SIZE);
|
||||||
goto restart;
|
goto restart;
|
||||||
@ -582,23 +564,32 @@ restart:
|
|||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
ThreadCPUPinner pinner(thread);
|
ThreadCPUPinner pinner(thread);
|
||||||
|
|
||||||
pt = map->arch_data->page_mapper->GetPageTableAt(
|
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||||
ADDR_REVERSE_SHIFT(pd[index].addr));
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
|
|
||||||
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
|
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
|
||||||
index++, start += B_PAGE_SIZE) {
|
index++, start += B_PAGE_SIZE) {
|
||||||
if (pt[index].present == 0) {
|
page_table_entry entry = pt[index];
|
||||||
|
if ((entry & X86_PTE_PRESENT) == 0) {
|
||||||
// page mapping not valid
|
// page mapping not valid
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
TRACE(("protect_tmap: protect page 0x%lx\n", start));
|
TRACE(("protect_tmap: protect page 0x%lx\n", start));
|
||||||
|
|
||||||
pt[index].user = (attributes & B_USER_PROTECTION) != 0;
|
entry &= ~(X86_PTE_WRITABLE | X86_PTE_USER);
|
||||||
if ((attributes & B_USER_PROTECTION) != 0)
|
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||||
pt[index].rw = (attributes & B_WRITE_AREA) != 0;
|
entry |= X86_PTE_USER;
|
||||||
else
|
if ((attributes & B_WRITE_AREA) != 0)
|
||||||
pt[index].rw = (attributes & B_KERNEL_WRITE_AREA) != 0;
|
entry |= X86_PTE_WRITABLE;
|
||||||
|
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||||
|
entry |= X86_PTE_WRITABLE;
|
||||||
|
|
||||||
|
set_page_table_entry(&pt[index], entry);
|
||||||
|
// TODO: We might have cleared accessed/modified flags!
|
||||||
|
|
||||||
|
// TODO: Optimize: If the accessed flag was clear, we don't need to
|
||||||
|
// invalidate the TLB for that address.
|
||||||
|
|
||||||
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
||||||
map->arch_data->pages_to_invalidate[
|
map->arch_data->pages_to_invalidate[
|
||||||
@ -617,37 +608,30 @@ restart:
|
|||||||
static status_t
|
static status_t
|
||||||
clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||||
{
|
{
|
||||||
page_table_entry *pt;
|
int index = VADDR_TO_PDENT(va);
|
||||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||||
int index;
|
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||||
int tlb_flush = false;
|
|
||||||
|
|
||||||
index = VADDR_TO_PDENT(va);
|
|
||||||
if (pd[index].present == 0) {
|
|
||||||
// no pagetable here
|
// no pagetable here
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
|
||||||
|
| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
|
||||||
|
|
||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
ThreadCPUPinner pinner(thread);
|
ThreadCPUPinner pinner(thread);
|
||||||
|
|
||||||
pt = map->arch_data->page_mapper->GetPageTableAt(
|
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||||
ADDR_REVERSE_SHIFT(pd[index].addr));
|
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||||
index = VADDR_TO_PTENT(va);
|
index = VADDR_TO_PTENT(va);
|
||||||
|
|
||||||
// clear out the flags we've been requested to clear
|
// clear out the flags we've been requested to clear
|
||||||
if (flags & PAGE_MODIFIED) {
|
page_table_entry oldEntry
|
||||||
pt[index].dirty = 0;
|
= clear_page_table_entry_flags(&pt[index], flagsToClear);
|
||||||
tlb_flush = true;
|
|
||||||
}
|
|
||||||
if (flags & PAGE_ACCESSED) {
|
|
||||||
pt[index].accessed = 0;
|
|
||||||
tlb_flush = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
pinner.Unlock();
|
pinner.Unlock();
|
||||||
|
|
||||||
if (tlb_flush) {
|
if ((oldEntry & flagsToClear) != 0) {
|
||||||
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
||||||
map->arch_data->pages_to_invalidate[
|
map->arch_data->pages_to_invalidate[
|
||||||
map->arch_data->num_invalidate_pages] = va;
|
map->arch_data->num_invalidate_pages] = va;
|
||||||
@ -889,7 +873,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
|||||||
TRACE(("vm_translation_map_init_post_area: entry\n"));
|
TRACE(("vm_translation_map_init_post_area: entry\n"));
|
||||||
|
|
||||||
// unmap the page hole hack we were using before
|
// unmap the page hole hack we were using before
|
||||||
sKernelVirtualPageDirectory[1023].present = 0;
|
sKernelVirtualPageDirectory[1023] = 0;
|
||||||
sPageHolePageDir = NULL;
|
sPageHolePageDir = NULL;
|
||||||
sPageHole = NULL;
|
sPageHole = NULL;
|
||||||
|
|
||||||
@ -926,7 +910,7 @@ arch_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
|
|||||||
|
|
||||||
// check to see if a page table exists for this range
|
// check to see if a page table exists for this range
|
||||||
index = VADDR_TO_PDENT(va);
|
index = VADDR_TO_PDENT(va);
|
||||||
if (sPageHolePageDir[index].present == 0) {
|
if ((sPageHolePageDir[index] & X86_PDE_PRESENT) == 0) {
|
||||||
addr_t pgtable;
|
addr_t pgtable;
|
||||||
page_directory_entry *e;
|
page_directory_entry *e;
|
||||||
// we need to allocate a pgtable
|
// we need to allocate a pgtable
|
||||||
@ -995,34 +979,35 @@ arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
|||||||
gPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
|
gPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
|
||||||
handle);
|
handle);
|
||||||
} else
|
} else
|
||||||
pageDirectoryEntry.present = 0;
|
pageDirectoryEntry = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// map the page table and get the entry
|
// map the page table and get the entry
|
||||||
page_table_entry pageTableEntry;
|
page_table_entry pageTableEntry;
|
||||||
index = VADDR_TO_PTENT(virtualAddress);
|
index = VADDR_TO_PTENT(virtualAddress);
|
||||||
|
|
||||||
if (pageDirectoryEntry.present != 0) {
|
if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0) {
|
||||||
void* handle;
|
void* handle;
|
||||||
addr_t virtualPageTable;
|
addr_t virtualPageTable;
|
||||||
status_t error = gPhysicalPageMapper->GetPageDebug(
|
status_t error = gPhysicalPageMapper->GetPageDebug(
|
||||||
ADDR_REVERSE_SHIFT(pageDirectoryEntry.addr), &virtualPageTable,
|
pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
|
||||||
&handle);
|
&handle);
|
||||||
if (error == B_OK) {
|
if (error == B_OK) {
|
||||||
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
|
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
|
||||||
gPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
|
gPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
|
||||||
} else
|
} else
|
||||||
pageTableEntry.present = 0;
|
pageTableEntry = 0;
|
||||||
} else
|
} else
|
||||||
pageTableEntry.present = 0;
|
pageTableEntry = 0;
|
||||||
|
|
||||||
// switch back to the original page directory
|
// switch back to the original page directory
|
||||||
if (physicalPageDirectory != (addr_t)sKernelPhysicalPageDirectory)
|
if (physicalPageDirectory != (addr_t)sKernelPhysicalPageDirectory)
|
||||||
write_cr3(physicalPageDirectory);
|
write_cr3(physicalPageDirectory);
|
||||||
|
|
||||||
if (pageTableEntry.present == 0)
|
if ((pageTableEntry & X86_PTE_PRESENT) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// present means kernel-readable, so check for writable
|
// present means kernel-readable, so check for writable
|
||||||
return (protection & B_KERNEL_WRITE_AREA) == 0 || pageTableEntry.rw != 0;
|
return (protection & B_KERNEL_WRITE_AREA) == 0
|
||||||
|
|| (pageTableEntry & X86_PTE_WRITABLE) != 0;
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
|
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||||
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
|
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||||
* Distributed under the terms of the MIT License.
|
* Distributed under the terms of the MIT License.
|
||||||
*
|
*
|
||||||
@ -17,9 +18,6 @@
|
|||||||
|
|
||||||
#define PAGE_INVALIDATE_CACHE_SIZE 64
|
#define PAGE_INVALIDATE_CACHE_SIZE 64
|
||||||
|
|
||||||
#define ADDR_SHIFT(x) ((x) >> 12)
|
|
||||||
#define ADDR_REVERSE_SHIFT(x) ((x) << 12)
|
|
||||||
|
|
||||||
#define VADDR_TO_PDENT(va) (((va) / B_PAGE_SIZE) / 1024)
|
#define VADDR_TO_PDENT(va) (((va) / B_PAGE_SIZE) / 1024)
|
||||||
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 1024)
|
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 1024)
|
||||||
|
|
||||||
@ -27,33 +25,39 @@
|
|||||||
class TranslationMapPhysicalPageMapper;
|
class TranslationMapPhysicalPageMapper;
|
||||||
|
|
||||||
|
|
||||||
typedef struct page_table_entry {
|
// page directory entry bits
|
||||||
uint32 present:1;
|
#define X86_PDE_PRESENT 0x00000001
|
||||||
uint32 rw:1;
|
#define X86_PDE_WRITABLE 0x00000002
|
||||||
uint32 user:1;
|
#define X86_PDE_USER 0x00000004
|
||||||
uint32 write_through:1;
|
#define X86_PDE_WRITE_THROUGH 0x00000008
|
||||||
uint32 cache_disabled:1;
|
#define X86_PDE_CACHING_DISABLED 0x00000010
|
||||||
uint32 accessed:1;
|
#define X86_PDE_ACCESSED 0x00000020
|
||||||
uint32 dirty:1;
|
#define X86_PDE_IGNORED1 0x00000040
|
||||||
uint32 reserved:1;
|
#define X86_PDE_RESERVED1 0x00000080
|
||||||
uint32 global:1;
|
#define X86_PDE_IGNORED2 0x00000100
|
||||||
uint32 avail:3;
|
#define X86_PDE_IGNORED3 0x00000200
|
||||||
uint32 addr:20;
|
#define X86_PDE_IGNORED4 0x00000400
|
||||||
} page_table_entry;
|
#define X86_PDE_IGNORED5 0x00000800
|
||||||
|
#define X86_PDE_ADDRESS_MASK 0xfffff000
|
||||||
|
|
||||||
typedef struct page_directory_entry {
|
// page table entry bits
|
||||||
uint32 present:1;
|
#define X86_PTE_PRESENT 0x00000001
|
||||||
uint32 rw:1;
|
#define X86_PTE_WRITABLE 0x00000002
|
||||||
uint32 user:1;
|
#define X86_PTE_USER 0x00000004
|
||||||
uint32 write_through:1;
|
#define X86_PTE_WRITE_THROUGH 0x00000008
|
||||||
uint32 cache_disabled:1;
|
#define X86_PTE_CACHING_DISABLED 0x00000010
|
||||||
uint32 accessed:1;
|
#define X86_PTE_ACCESSED 0x00000020
|
||||||
uint32 reserved:1;
|
#define X86_PTE_DIRTY 0x00000040
|
||||||
uint32 page_size:1;
|
#define X86_PTE_PAT 0x00000080
|
||||||
uint32 global:1;
|
#define X86_PTE_GLOBAL 0x00000100
|
||||||
uint32 avail:3;
|
#define X86_PTE_IGNORED1 0x00000200
|
||||||
uint32 addr:20;
|
#define X86_PTE_IGNORED2 0x00000400
|
||||||
} page_directory_entry;
|
#define X86_PTE_IGNORED3 0x00000800
|
||||||
|
#define X86_PTE_ADDRESS_MASK 0xfffff000
|
||||||
|
|
||||||
|
|
||||||
|
typedef uint32 page_table_entry;
|
||||||
|
typedef uint32 page_directory_entry;
|
||||||
|
|
||||||
|
|
||||||
struct vm_translation_map_arch_info : DeferredDeletable {
|
struct vm_translation_map_arch_info : DeferredDeletable {
|
||||||
@ -83,33 +87,24 @@ void x86_put_pgtable_in_pgdir(page_directory_entry* entry,
|
|||||||
void x86_update_all_pgdirs(int index, page_directory_entry entry);
|
void x86_update_all_pgdirs(int index, page_directory_entry entry);
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline page_table_entry
|
||||||
init_page_directory_entry(page_directory_entry *entry)
|
set_page_table_entry(page_table_entry* entry, page_table_entry newEntry)
|
||||||
{
|
{
|
||||||
*(uint32 *)entry = 0;
|
return atomic_set((int32*)entry, newEntry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline page_table_entry
|
||||||
update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
|
clear_page_table_entry_flags(page_table_entry* entry, uint32 flags)
|
||||||
{
|
{
|
||||||
// update page directory entry atomically
|
return atomic_and((int32*)entry, ~flags);
|
||||||
*(uint32 *)entry = *(uint32 *)with;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline page_table_entry
|
||||||
init_page_table_entry(page_table_entry *entry)
|
set_page_table_entry_flags(page_table_entry* entry, uint32 flags)
|
||||||
{
|
{
|
||||||
*(uint32 *)entry = 0;
|
return atomic_or((int32*)entry, flags);
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
update_page_table_entry(page_table_entry *entry, page_table_entry *with)
|
|
||||||
{
|
|
||||||
// update page table entry atomically
|
|
||||||
*(uint32 *)entry = *(uint32 *)with;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||||
* Distributed under the terms of the MIT License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
#ifndef _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
#ifndef _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
||||||
#define _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
#define _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
||||||
|
|
||||||
#include <SupportDefs.h>
|
|
||||||
|
#include "x86_paging.h"
|
||||||
|
|
||||||
|
|
||||||
struct kernel_args;
|
struct kernel_args;
|
||||||
struct page_table_entry;
|
|
||||||
struct vm_translation_map_ops;
|
struct vm_translation_map_ops;
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||||
* Distributed under the terms of the MIT License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -204,12 +204,8 @@ PhysicalPageSlot::Map(addr_t physicalAddress)
|
|||||||
{
|
{
|
||||||
page_table_entry& pte = pool->pageTable[
|
page_table_entry& pte = pool->pageTable[
|
||||||
(address - pool->virtualBase) / B_PAGE_SIZE];
|
(address - pool->virtualBase) / B_PAGE_SIZE];
|
||||||
init_page_table_entry(&pte);
|
pte = (physicalAddress & X86_PTE_ADDRESS_MASK)
|
||||||
pte.addr = ADDR_SHIFT(physicalAddress);
|
| X86_PTE_WRITABLE | X86_PTE_GLOBAL | X86_PTE_PRESENT;
|
||||||
pte.user = 0;
|
|
||||||
pte.rw = 1;
|
|
||||||
pte.present = 1;
|
|
||||||
pte.global = 1;
|
|
||||||
|
|
||||||
invalidate_TLB(address);
|
invalidate_TLB(address);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user