* Renamed the static variables according to Haiku conventions.

* Renamed occurrences of ASID/asid to VSID/vsid where appropriate.
* vm_translation_map_arch_info::vsid_base is now the first usable
  VSID and doesn't need to be shifted anymore.
* Changed the VSID base shift from 4 to 3, since we need only 8 VSIDs
  per team.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15772 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2006-01-01 16:15:04 +00:00
parent 8761241015
commit b019d8c5f3

View File

@ -53,17 +53,14 @@
registers (8 - 15) map the kernel addresses, so they remain unchanged. registers (8 - 15) map the kernel addresses, so they remain unchanged.
The range of the virtual address space a team's effective address space The range of the virtual address space a team's effective address space
is mapped to is defined by its vm_translation_map_arch_info::asid_base. is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
Left-shifted by ASID_SHIFT the value is the first of the 8 successive which is the first of the 8 successive VSID values used for the team.
VSID values used for the team.
Which asid_base values are already taken is defined by the set bits in Which vsid_base values are already taken is defined by the set bits in
the bitmap asid_bitmap. the bitmap sVSIDBaseBitmap.
TODO: TODO:
* Rename asid_bitmap and asid_bitmap_lock.
* An ASID_SHIFT of 3 is sufficient. The kernel reserves asid_base 1 then.
* The page table lies in physical memory and is identity mapped. Either * The page table lies in physical memory and is identity mapped. Either
the boot loader should already map it into the kernel address space or the boot loader should already map it into the kernel address space or
we need to remap here. Otherwise we can't create the area for obvious we need to remap here. Otherwise we can't create the area for obvious
@ -102,16 +99,20 @@ static area_id sPageTableRegion;
// put it 512 MB into kernel space // put it 512 MB into kernel space
#define IOSPACE_BASE (KERNEL_BASE + IOSPACE_SIZE) #define IOSPACE_BASE (KERNEL_BASE + IOSPACE_SIZE)
#define MAX_ASIDS (PAGE_SIZE * 8) // The VSID is a 24 bit number. The lower three bits are defined by the
static uint32 asid_bitmap[MAX_ASIDS / (sizeof(uint32) * 8)]; // (effective) segment number, which leaves us with a 21 bit space of
spinlock asid_bitmap_lock; // VSID bases (= 2 * 1024 * 1024).
#define ASID_SHIFT 4 #define MAX_VSID_BASES (PAGE_SIZE * 8)
#define VADDR_TO_ASID(map, vaddr) \ static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
(((map)->arch_data->asid_base << ASID_SHIFT) + ((vaddr) / 0x10000000)) static spinlock sVSIDBaseBitmapLock;
#define VSID_BASE_SHIFT 3
#define VADDR_TO_VSID(map, vaddr) \
((map)->arch_data->vsid_base + ((vaddr) >> 28))
// vm_translation object stuff // vm_translation object stuff
typedef struct vm_translation_map_arch_info { typedef struct vm_translation_map_arch_info {
int asid_base; // shift left by ASID_SHIFT to get the base asid to use int vsid_base; // used VSIDs are vside_base ... vsid_base + 7
} vm_translation_map_arch_info; } vm_translation_map_arch_info;
@ -122,16 +123,16 @@ ppc_translation_map_change_asid(vm_translation_map *map)
#if KERNEL_BASE != 0x80000000 #if KERNEL_BASE != 0x80000000
#error fix me #error fix me
#endif #endif
int asid_base = map->arch_data->asid_base << ASID_SHIFT; int vsidBase = map->arch_data->vsid_base;
asm("mtsr 0,%0" : : "g"(asid_base)); asm("mtsr 0,%0" : : "g"(vsidBase));
asm("mtsr 1,%0" : : "g"(asid_base + 1)); asm("mtsr 1,%0" : : "g"(vsidBase + 1));
asm("mtsr 2,%0" : : "g"(asid_base + 2)); asm("mtsr 2,%0" : : "g"(vsidBase + 2));
asm("mtsr 3,%0" : : "g"(asid_base + 3)); asm("mtsr 3,%0" : : "g"(vsidBase + 3));
asm("mtsr 4,%0" : : "g"(asid_base + 4)); asm("mtsr 4,%0" : : "g"(vsidBase + 4));
asm("mtsr 5,%0" : : "g"(asid_base + 5)); asm("mtsr 5,%0" : : "g"(vsidBase + 5));
asm("mtsr 6,%0" : : "g"(asid_base + 6)); asm("mtsr 6,%0" : : "g"(vsidBase + 6));
asm("mtsr 7,%0" : : "g"(asid_base + 7)); asm("mtsr 7,%0" : : "g"(vsidBase + 7));
} }
@ -159,8 +160,9 @@ destroy_tmap(vm_translation_map *map)
} }
// mark the asid not in use // mark the asid not in use
atomic_and((vint32 *)&asid_bitmap[map->arch_data->asid_base / 32], int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
~(1 << (map->arch_data->asid_base % 32))); atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
~(1 << (baseBit % 32)));
free(map->arch_data); free(map->arch_data);
recursive_lock_destroy(&map->lock); recursive_lock_destroy(&map->lock);
@ -201,7 +203,7 @@ static status_t
map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes) map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
{ {
// lookup the vsid based off the va // lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_ASID(map, virtualAddress); uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
uint32 protection = 0; uint32 protection = 0;
// ToDo: check this // ToDo: check this
@ -254,7 +256,7 @@ static page_table_entry *
lookup_pagetable_entry(vm_translation_map *map, addr_t virtualAddress) lookup_pagetable_entry(vm_translation_map *map, addr_t virtualAddress)
{ {
// lookup the vsid based off the va // lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_ASID(map, virtualAddress); uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
// dprintf("vm_translation_map.lookup_pagetable_entry: vsid %d, va 0x%lx\n", vsid, va); // dprintf("vm_translation_map.lookup_pagetable_entry: vsid %d, va 0x%lx\n", vsid, va);
@ -454,33 +456,38 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
} }
cpu_status state = disable_interrupts(); cpu_status state = disable_interrupts();
acquire_spinlock(&asid_bitmap_lock); acquire_spinlock(&sVSIDBaseBitmapLock);
// allocate a ASID base for this one // allocate a ASID base for this one
if (kernel) { if (kernel) {
map->arch_data->asid_base = 0; // set up by the bootloader // The boot loader (respectively the Open Firmware) should has set up
asid_bitmap[0] |= 0x1; // the segment registers for identical mapping. Two VSID bases are
// reserved for the kernel: 0 and 8. The latter one for mapping the
// kernel address space (0x80000000...), the former one for the lower
// addresses required by the Open Firmware services.
map->arch_data->vsid_base = 0;
sVSIDBaseBitmap[0] |= 0x3;
} else { } else {
int i = 0; int i = 0;
while (i < MAX_ASIDS) { while (i < MAX_VSID_BASES) {
if (asid_bitmap[i / 32] == 0xffffffff) { if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
i += 32; i += 32;
continue; continue;
} }
if ((asid_bitmap[i / 32] & (1 << (i % 32))) == 0) { if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
// we found it // we found it
asid_bitmap[i / 32] |= 1 << (i % 32); sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
break; break;
} }
i++; i++;
} }
if (i >= MAX_ASIDS) if (i >= MAX_VSID_BASES)
panic("vm_translation_map_create: out of ASIDs\n"); panic("vm_translation_map_create: out of VSID bases\n");
map->arch_data->asid_base = i; map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
} }
release_spinlock(&asid_bitmap_lock); release_spinlock(&sVSIDBaseBitmapLock);
restore_interrupts(state); restore_interrupts(state);
return B_OK; return B_OK;