kernel: relocate some user virtual addresses
This commit is contained in:
parent
f778967edb
commit
49fd67bf2d
@ -7,6 +7,14 @@
|
||||
#include <kernel/arch/aarch64/pml.h>
|
||||
#endif
|
||||
|
||||
#define KERNEL_HEAP_START 0xFFFFff0000000000UL
|
||||
#define MMIO_BASE_START 0xffffff1fc0000000UL
|
||||
#define HIGH_MAP_REGION 0xffffff8000000000UL
|
||||
#define MODULE_BASE_START 0xffffffff80000000UL
|
||||
#define USER_SHM_LOW 0x0000400100000000UL
|
||||
#define USER_SHM_HIGH 0x0000500000000000UL
|
||||
#define USER_DEVICE_MAP 0x0000400000000000UL
|
||||
|
||||
#define MMU_FLAG_KERNEL 0x01
|
||||
#define MMU_FLAG_WRITABLE 0x02
|
||||
#define MMU_FLAG_NOCACHE 0x04
|
||||
|
@ -34,15 +34,6 @@ uintptr_t aarch64_kernel_phys_base = 0;
|
||||
|
||||
#define LARGE_PAGE_SIZE 0x200000UL
|
||||
|
||||
#define KERNEL_HEAP_START 0xFFFFff0000000000UL
|
||||
#define MMIO_BASE_START 0xffffff1fc0000000UL
|
||||
#define HIGH_MAP_REGION 0xffffff8000000000UL
|
||||
#define MODULE_BASE_START 0xffffffff80000000UL
|
||||
|
||||
#define USER_SHM_LOW 0x0000000200000000UL
|
||||
#define USER_SHM_HIGH 0x0000000400000000UL
|
||||
#define USER_DEVICE_MAP 0x0000000100000000UL
|
||||
|
||||
#define PHYS_MASK 0x7fffffffffUL
|
||||
#define CANONICAL_MASK 0xFFFFffffFFFFUL
|
||||
|
||||
@ -533,20 +524,23 @@ size_t mmu_count_shm(union PML * from) {
|
||||
/* We walk 'from' and count shm region stuff */
|
||||
size_t out = 0;
|
||||
|
||||
if (from[0].bits.present) {
|
||||
union PML * pdp_in = mmu_map_from_physical((uintptr_t)from[0].bits.page << PAGE_SHIFT);
|
||||
/* [0,8,0,0] through [0,15,511,511] map to our current SHM mapping region;
|
||||
* if you change the bounds of that region, be sure to update this! */
|
||||
for (size_t j = 8; j < 16; ++j) {
|
||||
if (pdp_in[j].bits.present) {
|
||||
union PML * pd_in = mmu_map_from_physical((uintptr_t)pdp_in[j].bits.page << PAGE_SHIFT);
|
||||
for (size_t k = 0; k < 512; ++k) {
|
||||
if (pd_in[k].bits.present) {
|
||||
union PML * pt_in = mmu_map_from_physical((uintptr_t)pd_in[k].bits.page << PAGE_SHIFT);
|
||||
for (size_t l = 0; l < 512; ++l) {
|
||||
if (pt_in[l].bits.present) {
|
||||
if (pt_in[l].bits.ap & 1) {
|
||||
out++;
|
||||
for (size_t i = 0; i < 256; ++i) {
|
||||
if (from[i].bits.present) {
|
||||
union PML * pdp_in = mmu_map_from_physical((uintptr_t)from[i].bits.page << PAGE_SHIFT);
|
||||
for (size_t j = 0; j < 512; ++j) {
|
||||
if (pdp_in[j].bits.present) {
|
||||
union PML * pd_in = mmu_map_from_physical((uintptr_t)pdp_in[j].bits.page << PAGE_SHIFT);
|
||||
for (size_t k = 0; k < 512; ++k) {
|
||||
if (pd_in[k].bits.present) {
|
||||
union PML * pt_in = mmu_map_from_physical((uintptr_t)pd_in[k].bits.page << PAGE_SHIFT);
|
||||
for (size_t l = 0; l < 512; ++l) {
|
||||
/* Calculate final address to skip SHM */
|
||||
uintptr_t address = ((i << (9 * 3 + 12)) | (j << (9*2 + 12)) | (k << (9 + 12)) | (l << PAGE_SHIFT));
|
||||
if (address < USER_DEVICE_MAP && address >= USER_SHM_HIGH) continue;
|
||||
if (pt_in[l].bits.present) {
|
||||
if (pt_in[l].bits.ap & 1) {
|
||||
out++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,16 +37,6 @@ static uint8_t * mem_refcounts = NULL;
|
||||
|
||||
#define LARGE_PAGE_SIZE 0x200000UL
|
||||
|
||||
#define KERNEL_HEAP_START 0xFFFFff0000000000UL
|
||||
#define MMIO_BASE_START 0xffffff1fc0000000UL
|
||||
#define HIGH_MAP_REGION 0xffffff8000000000UL
|
||||
#define MODULE_BASE_START 0xffffffff80000000UL
|
||||
|
||||
/* These are actually defined in the shm layer... */
|
||||
#define USER_SHM_LOW 0x0000000200000000UL
|
||||
#define USER_SHM_HIGH 0x0000000400000000UL
|
||||
#define USER_DEVICE_MAP 0x0000000100000000UL
|
||||
|
||||
#define USER_PML_ACCESS 0x07
|
||||
#define KERNEL_PML_ACCESS 0x03
|
||||
#define LARGE_PAGE_BIT 0x80
|
||||
@ -696,20 +686,23 @@ size_t mmu_count_user(union PML * from) {
|
||||
size_t mmu_count_shm(union PML * from) {
|
||||
size_t out = 0;
|
||||
|
||||
if (from[0].bits.present) {
|
||||
union PML * pdp_in = mmu_map_from_physical((uintptr_t)from[0].bits.page << PAGE_SHIFT);
|
||||
/* [0,8,0,0] through [0,15,511,511] map to our current SHM mapping region;
|
||||
* if you change the bounds of that region, be sure to update this! */
|
||||
for (size_t j = 8; j < 16; ++j) {
|
||||
if (pdp_in[j].bits.present) {
|
||||
union PML * pd_in = mmu_map_from_physical((uintptr_t)pdp_in[j].bits.page << PAGE_SHIFT);
|
||||
for (size_t k = 0; k < 512; ++k) {
|
||||
if (pd_in[k].bits.present) {
|
||||
union PML * pt_in = mmu_map_from_physical((uintptr_t)pd_in[k].bits.page << PAGE_SHIFT);
|
||||
for (size_t l = 0; l < 512; ++l) {
|
||||
if (pt_in[l].bits.present) {
|
||||
if (pt_in[l].bits.user) {
|
||||
out++;
|
||||
for (size_t i = 0; i < 256; ++i) {
|
||||
if (from[i].bits.present) {
|
||||
union PML * pdp_in = mmu_map_from_physical((uintptr_t)from[i].bits.page << PAGE_SHIFT);
|
||||
for (size_t j = 0; j < 512; ++j) {
|
||||
if (pdp_in[j].bits.present) {
|
||||
union PML * pd_in = mmu_map_from_physical((uintptr_t)pdp_in[j].bits.page << PAGE_SHIFT);
|
||||
for (size_t k = 0; k < 512; ++k) {
|
||||
if (pd_in[k].bits.present) {
|
||||
union PML * pt_in = mmu_map_from_physical((uintptr_t)pd_in[k].bits.page << PAGE_SHIFT);
|
||||
for (size_t l = 0; l < 512; ++l) {
|
||||
/* Calculate final address to skip SHM */
|
||||
uintptr_t address = ((i << (9 * 3 + 12)) | (j << (9*2 + 12)) | (k << (9 + 12)) | (l << PAGE_SHIFT));
|
||||
if (address < USER_DEVICE_MAP || address > USER_SHM_HIGH) continue;
|
||||
if (pt_in[l].bits.present) {
|
||||
if (pt_in[l].bits.user) {
|
||||
out++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ process_t * spawn_init(void) {
|
||||
mmu_frame_allocate(
|
||||
mmu_get_page(init->image.stack - KERNEL_STACK_SIZE, 0),
|
||||
MMU_FLAG_KERNEL);
|
||||
init->image.shm_heap = 0x200000000; /* That's 8GiB? That should work fine... */
|
||||
init->image.shm_heap = USER_SHM_LOW;
|
||||
|
||||
init->flags = PROC_FLAG_STARTED | PROC_FLAG_RUNNING;
|
||||
init->wait_queue = list_create("process wait queue (init)", init);
|
||||
@ -465,7 +465,7 @@ process_t * spawn_process(volatile process_t * parent, int flags) {
|
||||
mmu_frame_allocate(
|
||||
mmu_get_page(proc->image.stack - KERNEL_STACK_SIZE, 0),
|
||||
MMU_FLAG_KERNEL);
|
||||
proc->image.shm_heap = 0x200000000; /* FIXME this should be a macro def */
|
||||
proc->image.shm_heap = USER_SHM_LOW;
|
||||
|
||||
if (flags & PROC_REUSE_FDS) {
|
||||
spin_lock(parent->fds->lock);
|
||||
|
@ -171,7 +171,7 @@ static void * map_in (shm_chunk_t * chunk, volatile process_t * volatile proc) {
|
||||
mapping->num_vaddrs = chunk->num_frames;
|
||||
mapping->vaddrs = malloc(sizeof(uintptr_t) * mapping->num_vaddrs);
|
||||
|
||||
uintptr_t last_address = 0x200000000;
|
||||
uintptr_t last_address = USER_SHM_LOW;
|
||||
foreach(node, proc->shm_mappings) {
|
||||
shm_mapping_t * m = node->value;
|
||||
if (m->vaddrs[0] > last_address) {
|
||||
|
@ -103,7 +103,7 @@ static int ioctl_vid(fs_node_t * node, unsigned long request, void * argp) {
|
||||
uintptr_t lfb_user_offset;
|
||||
if (*(uintptr_t*)argp == 0) {
|
||||
/* Pick an address and map it */
|
||||
lfb_user_offset = 0x100000000; /* at 4GiB seems good */
|
||||
lfb_user_offset = USER_DEVICE_MAP;
|
||||
} else {
|
||||
validate((void*)(*(uintptr_t*)argp));
|
||||
lfb_user_offset = *(uintptr_t*)argp;
|
||||
@ -490,7 +490,7 @@ static int ioctl_vga(fs_node_t * node, unsigned long request, void * argp) {
|
||||
{
|
||||
uintptr_t vga_user_offset;
|
||||
if (*(uintptr_t*)argp == 0) {
|
||||
vga_user_offset = 0x100000000;
|
||||
vga_user_offset = USER_DEVICE_MAP;
|
||||
} else {
|
||||
validate((void*)(*(uintptr_t*)argp));
|
||||
vga_user_offset = *(uintptr_t*)argp;
|
||||
|
Loading…
Reference in New Issue
Block a user