kernel/arm: add memory barriers for page table ops

Introduce memory barriers according to ARMARM,
section G.5.3 TLB maintenance operations and barriers

Sequence for mapping memory in (both L1 and L2):
* DSB
* Invalidate i-cache (TODO)
* Insert new entry in page directory / page table
* DSB
* ISB

Sequence for mapping memory out:
* Remove page table entry
* DSB
* Invalidate TLB entry
* DSB
* ISB

Sequence for updating a page table entry:
* Update page table entry
* DSB
* Invalidate TLB entry
* Invalidate branch predictor (TODO)
* DSB
* ISB

Note: i-cache invalidation and branch predictor invalidation is
not implemented yet as this commit focuses on implementing memory
barriers.

Change-Id: I192fa80f6b43117236a4be6fa8c988afca90e015
Reviewed-on: https://review.haiku-os.org/c/haiku/+/5241
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
Reviewed-by: Fredrik Holmqvist <fredrik.holmqvist@gmail.com>
This commit is contained in:
David Karoly 2022-04-23 10:12:48 +02:00 committed by waddlesplash
parent cf5f513b3b
commit 83f755b5d8
3 changed files with 49 additions and 26 deletions

View File

@ -11,23 +11,9 @@
// TODO: Could be 32-bits sometimes?
#if __ARM_ARCH__ <= 5
#define isb() __asm__ __volatile__("" : : : "memory")
#define dsb() __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
#define dmb() __asm__ __volatile__("" : : : "memory")
#elif __ARM_ARCH__ == 6
#define isb() __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
#define dsb() __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
#define dmb() __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 5" \
: : "r" (0) : "memory")
#else /* ARMv7+ */
#define isb() __asm__ __volatile__("isb" : : : "memory")
#define dsb() __asm__ __volatile__("dsb" : : : "memory")
#define dmb() __asm__ __volatile__("dmb" : : : "memory")
#endif
#define set_ac()
#define clear_ac()
@ -98,6 +84,7 @@ extern addr_t arm_get_fp(void);
extern int mmu_read_c1(void);
extern int mmu_write_c1(int val);
void arch_cpu_invalidate_TLB_page(addr_t page);
static inline void
arch_cpu_pause(void)

View File

@ -117,34 +117,70 @@ arch_cpu_memory_write_barrier(void)
}
void
arch_cpu_invalidate_TLB_page(addr_t page)
{
// ensure visibility of the update to translation table walks
dsb();
// TLBIMVAIS(page)
asm volatile ("mcr p15, 0, %0, c8, c3, 1"
: : "r" (page));
// ensure completion of TLB invalidation
dsb();
isb();
}
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
// ensure visibility of the update to translation table walks
dsb();
int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE;
while (num_pages-- >= 0) {
asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1"
: : [c8format] "r" (start) );
start += B_PAGE_SIZE;
}
// ensure completion of TLB invalidation
dsb();
isb();
}
void
arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
// ensure visibility of the update to translation table walks
dsb();
for (int i = 0; i < num_pages; i++) {
asm volatile ("mcr p15, 0, %[c8format], c8, c6, 1":
: [c8format] "r" (pages[i]) );
}
// ensure completion of TLB invalidation
dsb();
isb();
}
void
arch_cpu_global_TLB_invalidate(void)
{
// ensure visibility of the update to translation table walks
dsb();
uint32 Rd = 0;
asm volatile ("mcr p15, 0, %[c8format], c8, c7, 0"
: : [c8format] "r" (Rd) );
// ensure completion of TLB invalidation
dsb();
isb();
}

View File

@ -185,8 +185,7 @@ ARMPagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
| ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C
| ARM_MMU_L2_FLAG_AP_KRW | ARM_MMU_L2_FLAG_XN;
arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress + B_PAGE_SIZE);
// invalidate_TLB(virtualAddress);
arch_cpu_invalidate_TLB_page(virtualAddress);
}
@ -396,12 +395,12 @@ ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
"pgtable. phys=%#" B_PRIxPHYSADDR ", virt=%#" B_PRIxADDR "\n",
pgtable_phys, pgtable_virt);
// zero it out in it's new mapping
memset((void*)pgtable_virt, 0, B_PAGE_SIZE);
// put it in the pgdir
e = &fKernelVirtualPageDirectory[index];
PutPageTableInPageDir(e, pgtable_phys, attributes);
// zero it out in it's new mapping
memset((void*)pgtable_virt, 0, B_PAGE_SIZE);
}
phys_addr_t ptEntryPhys = fKernelVirtualPageDirectory[index] & ARM_PDE_ADDRESS_MASK;
@ -493,14 +492,12 @@ ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
ARMPagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
phys_addr_t pgtablePhysical, uint32 attributes)
{
dsb();
*entry = (pgtablePhysical & ARM_PDE_ADDRESS_MASK) | ARM_MMU_L1_TYPE_COARSE;
// TODO: we ignore the attributes of the page table - for compatibility
// with BeOS we allow having user accessible areas in the kernel address
// space. This is currently being used by some drivers, mainly for the
// frame buffer. Our current real time data implementation makes use of
// this fact, too.
// We might want to get rid of this possibility one day, especially if
// we intend to port it to a platform that does not support this.
dsb();
isb();
}
@ -517,6 +514,9 @@ ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
// put it in the page table
*(volatile page_table_entry*)entry = page;
dsb();
isb();
}