Code reorganization + small bug fixes in translate linear code
This commit is contained in:
parent
02ba8c791f
commit
4e5d10d02e
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: cpu.h,v 1.478 2008-05-15 20:09:59 sshwarts Exp $
|
||||
// $Id: cpu.h,v 1.479 2008-05-19 18:10:32 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -2923,6 +2923,7 @@ public: // for now...
|
||||
|
||||
// linear address for translate_linear expected to be canonical !
|
||||
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw, unsigned access_type);
|
||||
BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &combined_access, unsigned curr_pl, unsigned rw, unsigned access_type);
|
||||
|
||||
BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
|
||||
{
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: paging.cc,v 1.131 2008-05-12 19:19:03 sshwarts Exp $
|
||||
// $Id: paging.cc,v 1.132 2008-05-19 18:10:32 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -269,9 +269,6 @@
|
||||
//
|
||||
// - Processor running at CPL=0,1,2 maps to U/S=0
|
||||
// Processor running at CPL=3 maps to U/S=1
|
||||
//
|
||||
// - Pentium+ processors have separate TLB's for data and instruction caches
|
||||
// - Pentium Pro+ processors maintain separate 4K and 4M TLBs.
|
||||
|
||||
#define BX_INVALID_TLB_ENTRY 0xffffffff
|
||||
|
||||
@ -617,13 +614,13 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsig
|
||||
exception(BX_PF_EXCEPTION, error_code, 0);
|
||||
}
|
||||
|
||||
/* PAE PML4: bits [51 .. physical address width], [7] */
|
||||
/* PAE PML4: bits [51 .. physical address width], [7] - support 1G paging */
|
||||
#define PAGING_PAE_PML4_RESERVED_BITS \
|
||||
(BX_PHY_ADDRESS_RESERVED_BITS | BX_CONST64(0x00000080))
|
||||
(BX_PHY_ADDRESS_RESERVED_BITS/* | BX_CONST64(0x80)*/)
|
||||
|
||||
/* PAE PDPE: bits [51 .. physical address width], [7] - not support 1G paging */
|
||||
/* PAE PDPE: bits [51 .. physical address width], [7] - support 1G paging */
|
||||
#define PAGING_PAE_PDPE_RESERVED_BITS \
|
||||
(BX_PHY_ADDRESS_RESERVED_BITS | BX_CONST64(0x00000080))
|
||||
(BX_PHY_ADDRESS_RESERVED_BITS/* | BX_CONST64(0x80)*/)
|
||||
|
||||
/* PAE PDE: bits [51 .. physical address width] */
|
||||
#define PAGING_PAE_PDE_RESERVED_BITS (BX_PHY_ADDRESS_RESERVED_BITS)
|
||||
@ -635,66 +632,31 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsig
|
||||
/* PAE PTE: bits [51 .. physical address width] */
|
||||
#define PAGING_PAE_PTE_RESERVED_BITS (BX_PHY_ADDRESS_RESERVED_BITS)
|
||||
|
||||
|
||||
#define PAGE_DIRECTORY_NX_BIT (BX_CONST64(0x8000000000000000))
|
||||
|
||||
// Translate a linear address to a physical address
|
||||
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw, unsigned access_type)
|
||||
// Translate a linear address to a physical address in PAE paging mode
|
||||
bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &combined_access, unsigned curr_pl, unsigned rw, unsigned access_type)
|
||||
{
|
||||
Bit32u accessBits, combined_access = 0;
|
||||
unsigned priv_index;
|
||||
|
||||
// note - we assume physical memory < 4gig so for brevity & speed, we'll use
|
||||
// 32 bit entries although cr3 is expanded to 64 bits.
|
||||
bx_phy_address paddress, ppf, poffset = PAGE_OFFSET(laddr);
|
||||
bx_phy_address pdpe_addr, ppf;
|
||||
Bit64u pdpe, pde, pte;
|
||||
#if BX_SUPPORT_X86_64
|
||||
Bit64u pml4, pml4_addr;
|
||||
#endif
|
||||
unsigned priv_index, nx_fault = 0;
|
||||
bx_bool isWrite = (rw >= BX_WRITE); // write or r-m-w
|
||||
unsigned pl = (curr_pl == 3);
|
||||
|
||||
InstrTLB_Increment(tlbLookups);
|
||||
InstrTLB_Stats();
|
||||
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
|
||||
|
||||
// already looked up TLB for code access
|
||||
if (access_type != CODE_ACCESS && tlbEntry->lpf == lpf)
|
||||
{
|
||||
paddress = tlbEntry->ppf | poffset;
|
||||
accessBits = tlbEntry->accessBits;
|
||||
|
||||
if (accessBits & (0x0100 << ((isWrite<<2) | curr_pl)))
|
||||
return(paddress);
|
||||
|
||||
// The current access does not have permission according to the info
|
||||
// in our TLB cache entry. Re-walk the page tables, in case there is
|
||||
// updated information in the memory image, and let the long path code
|
||||
// generate an exception if one is warranted.
|
||||
}
|
||||
|
||||
BX_DEBUG(("page walk for address 0x" FMT_LIN_ADDRX, laddr));
|
||||
|
||||
InstrTLB_Increment(tlbMisses);
|
||||
|
||||
#if BX_SUPPORT_PAE
|
||||
if (BX_CPU_THIS_PTR cr4.get_PAE())
|
||||
{
|
||||
bx_phy_address pdpe_addr;
|
||||
Bit64u pdpe, pde, pte;
|
||||
#if BX_SUPPORT_X86_64
|
||||
Bit64u pml4;
|
||||
#endif
|
||||
unsigned nx_fault = 0;
|
||||
combined_access = 0;
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (long_mode()) {
|
||||
// Get PML4 entry
|
||||
bx_phy_address pml4_addr = (bx_phy_address)(BX_CPU_THIS_PTR cr3_masked |
|
||||
pml4_addr = (bx_phy_address)(BX_CPU_THIS_PTR cr3_masked |
|
||||
((laddr & BX_CONST64(0x0000ff8000000000)) >> 36));
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pml4_addr, 8, &pml4);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pml4_addr, 8, BX_READ, (Bit8u*)(&pml4));
|
||||
|
||||
if (!(pml4 & 0x01)) {
|
||||
if (!(pml4 & 0x1)) {
|
||||
BX_DEBUG(("PML4: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
@ -718,12 +680,6 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
}
|
||||
}
|
||||
|
||||
if (!(pml4 & 0x20)) {
|
||||
pml4 |= 0x20;
|
||||
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, pml4_addr, 8, &pml4);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pml4_addr, 8, BX_WRITE, (Bit8u*)(&pml4));
|
||||
}
|
||||
|
||||
pdpe_addr = (bx_phy_address)((pml4 & BX_CONST64(0x000ffffffffff000)) |
|
||||
((laddr & BX_CONST64(0x0000007fc0000000)) >> 27));
|
||||
}
|
||||
@ -736,7 +692,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pdpe_addr, 8, &pdpe);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pdpe_addr, 8, BX_READ, (Bit8u*)(&pdpe));
|
||||
|
||||
if (!(pdpe & 0x01)) {
|
||||
if (!(pdpe & 0x1)) {
|
||||
BX_DEBUG(("PAE PDPE: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
@ -767,7 +723,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pde_addr, 8, &pde);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pde_addr, 8, BX_READ, (Bit8u*)(&pde));
|
||||
|
||||
if (!(pde & 0x01)) {
|
||||
if (!(pde & 0x1)) {
|
||||
BX_DEBUG(("PAE PDE: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
@ -795,7 +751,6 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
|
||||
// Ignore CR4.PSE in PAE mode
|
||||
if (pde & 0x80) {
|
||||
|
||||
if (pde & PAGING_PAE_PDE4M_RESERVED_BITS) {
|
||||
BX_DEBUG(("PAE PDE4M: reserved bit is set PDE=%08x:%08x", GET32H(pde), GET32L(pde)));
|
||||
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type);
|
||||
@ -825,6 +780,17 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
if (!priv_check[priv_index] || nx_fault)
|
||||
page_fault(ERROR_PROTECTION, laddr, pl, isWrite, access_type);
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (long_mode()) {
|
||||
// Update PML4 A bit if needed.
|
||||
if (!(pml4 & 0x20)) {
|
||||
pml4 |= 0x20;
|
||||
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, pml4_addr, 8, &pml4);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pml4_addr, 8, BX_WRITE, (Bit8u*)(&pml4));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Update PDPE A bit if needed.
|
||||
if (!(pdpe & 0x20)) {
|
||||
pdpe |= 0x20;
|
||||
@ -841,8 +807,10 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
|
||||
// Make up the physical page frame address.
|
||||
ppf = (bx_phy_address)((pde & BX_CONST64(0x000fffffffe00000)) | (laddr & 0x001ff000));
|
||||
|
||||
return ppf;
|
||||
}
|
||||
else {
|
||||
|
||||
// 4k pages, Get page table entry
|
||||
bx_phy_address pte_addr = (bx_phy_address)((pde & BX_CONST64(0x000ffffffffff000)) |
|
||||
((laddr & 0x001ff000) >> 9));
|
||||
@ -850,7 +818,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pte_addr, 8, &pte);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pte_addr, 8, BX_READ, (Bit8u*)(&pte));
|
||||
|
||||
if (!(pte & 0x01)) {
|
||||
if (!(pte & 0x1)) {
|
||||
BX_DEBUG(("PAE PTE: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
@ -899,6 +867,17 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
if (!priv_check[priv_index] || nx_fault)
|
||||
page_fault(ERROR_PROTECTION, laddr, pl, isWrite, access_type);
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (long_mode()) {
|
||||
// Update PML4 A bit if needed.
|
||||
if (!(pml4 & 0x20)) {
|
||||
pml4 |= 0x20;
|
||||
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, pml4_addr, 8, &pml4);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pml4_addr, 8, BX_WRITE, (Bit8u*)(&pml4));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Update PDPE A bit if needed.
|
||||
if (!(pdpe & 0x20)) {
|
||||
pdpe |= 0x20;
|
||||
@ -922,7 +901,56 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
|
||||
// Make up the physical page frame address.
|
||||
ppf = (bx_phy_address)(pte & BX_CONST64(0x000ffffffffff000));
|
||||
|
||||
return ppf;
|
||||
}
|
||||
|
||||
/* PSE PDE4M: bits [21:17] */
|
||||
#define PAGING_PSE_PDE4M_RESERVED_BITS \
|
||||
(BX_PHY_ADDRESS_RESERVED_BITS | BX_CONST64(0x003E0000))
|
||||
|
||||
// Translate a linear address to a physical address
|
||||
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw, unsigned access_type)
|
||||
{
|
||||
Bit32u accessBits, combined_access = 0;
|
||||
unsigned priv_index;
|
||||
|
||||
// note - we assume physical memory < 4gig so for brevity & speed, we'll use
|
||||
// 32 bit entries although cr3 is expanded to 64 bits.
|
||||
bx_phy_address paddress, ppf, poffset = PAGE_OFFSET(laddr);
|
||||
bx_bool isWrite = (rw >= BX_WRITE); // write or r-m-w
|
||||
unsigned pl = (curr_pl == 3);
|
||||
|
||||
InstrTLB_Increment(tlbLookups);
|
||||
InstrTLB_Stats();
|
||||
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
|
||||
|
||||
// already looked up TLB for code access
|
||||
if (access_type != CODE_ACCESS && tlbEntry->lpf == lpf)
|
||||
{
|
||||
paddress = tlbEntry->ppf | poffset;
|
||||
accessBits = tlbEntry->accessBits;
|
||||
|
||||
if (accessBits & (0x0100 << ((isWrite<<2) | curr_pl)))
|
||||
return paddress;
|
||||
|
||||
// The current access does not have permission according to the info
|
||||
// in our TLB cache entry. Re-walk the page tables, in case there is
|
||||
// updated information in the memory image, and let the long path code
|
||||
// generate an exception if one is warranted.
|
||||
}
|
||||
|
||||
BX_DEBUG(("page walk for address 0x" FMT_LIN_ADDRX, laddr));
|
||||
|
||||
InstrTLB_Increment(tlbMisses);
|
||||
|
||||
#if BX_SUPPORT_PAE
|
||||
if (BX_CPU_THIS_PTR cr4.get_PAE())
|
||||
{
|
||||
ppf = translate_linear_PAE(laddr, combined_access, curr_pl, rw, access_type);
|
||||
}
|
||||
else
|
||||
#endif // #if BX_SUPPORT_PAE
|
||||
@ -936,19 +964,24 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pde_addr, 4, &pde);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pde_addr, 4, BX_READ, (Bit8u*)(&pde));
|
||||
|
||||
if (!(pde & 0x01)) {
|
||||
if (!(pde & 0x1)) {
|
||||
BX_DEBUG(("PDE: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_LARGE_PAGES
|
||||
if ((pde & 0x80) && (BX_CPU_THIS_PTR cr4.get_PSE()))
|
||||
if ((pde & 0x80) && BX_CPU_THIS_PTR cr4.get_PSE())
|
||||
{
|
||||
// Note: when the PSE and PAE flags in CR4 are set, the
|
||||
// processor generates a PF if the reserved bits are not zero.
|
||||
if (pde & PAGING_PSE_PDE4M_RESERVED_BITS) {
|
||||
BX_DEBUG(("PSE PDE4M: reserved bit is set: PDE=0x%08x", pde));
|
||||
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
|
||||
#if BX_PHY_ADDRESS_WIDTH == 32
|
||||
if (pde & 0x0001e000) {
|
||||
BX_PANIC(("PSE PDE 0x%08x: Only 32 bit physical address space is emulated !", pde));
|
||||
BX_PANIC(("PSE PDE4M 0x%08x: Only 32 bit physical address space is emulated !", pde));
|
||||
}
|
||||
#endif
|
||||
// Combined access is just access from the pde (no pte involved).
|
||||
@ -989,7 +1022,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pte_addr, 4, &pte);
|
||||
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pte_addr, 4, BX_READ, (Bit8u*)(&pte));
|
||||
|
||||
if (!(pte & 0x01)) {
|
||||
if (!(pte & 0x1)) {
|
||||
BX_DEBUG(("PTE: entry not present"));
|
||||
page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user