I rehashed the way the paging code stores protection bits,

so that a compare of the current access could be done more
efficiently against the cached values, both in the normal
paging routines, and in the accelerated code in access.cc.

This cut down the amount of code path needed to get to
direct use of a host address nicely, and speed definitely
got a boost as a result, especially if you use the
--enable-guest2host-tlb option.

The CR0.WP flag was a real pain, because it imparts
a complication on the way protections work.  Fortunately
it's not a high-change flag, so I just base the new
cached info on the current CR0.WP value, and dump
the TLB cache when it changes.
This commit is contained in:
Kevin Lawton 2002-09-04 08:59:13 +00:00
parent 7b157bbf43
commit d07c1c0bb0
4 changed files with 171 additions and 261 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access.cc,v 1.14 2002-09-03 04:54:28 kevinlawton Exp $
// $Id: access.cc,v 1.15 2002-09-04 08:59:13 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -255,7 +255,6 @@ BX_CPU_C::strseg(bx_segment_reg_t *seg)
}
extern unsigned priv_check[];
@ -282,34 +281,20 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access = BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // bit 4
#endif
(pl<<3) | // bit 3
(combined_access & 0x07); // bit 2,1,0
// Let bit0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit8u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit8u*) (hostPageAddr + pageOffset);
*hostAddr = *data;
return;
}
accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit8u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
*hostAddr = *data;
return;
}
}
}
@ -349,35 +334,21 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // b4
#endif
(pl<<3) | // b3
(combined_access & 0x07); // b{2,1,0}
// Let b0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit16u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit16u*) (hostPageAddr + pageOffset);
*hostAddr = *data;
return;
}
accessBits =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit16u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
*hostAddr = *data;
return;
}
}
}
@ -418,35 +389,21 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // b4
#endif
(pl<<3) | // b3
(combined_access & 0x07); // b{2,1,0}
// Let b0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit32u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit32u*) (hostPageAddr + pageOffset);
*hostAddr = *data;
return;
}
accessBits =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit32u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
*hostAddr = *data;
return;
}
}
}
@ -488,15 +445,15 @@ accessOK:
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if ( ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access>>2) & 1)
== pl ) {
Bit32u accessBits;
accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1<<pl) ) { // Read this pl OK.
Bit32u hostPageAddr;
Bit8u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit8u*) (hostPageAddr + pageOffset);
hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
return;
}
@ -542,14 +499,15 @@ accessOK:
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if ( ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access>>2) & 1)
== pl ) {
Bit32u accessBits;
accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1<<pl) ) { // Read this pl OK.
Bit32u hostPageAddr;
Bit16u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access & 0xfffffff8;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit16u*) (hostPageAddr + pageOffset);
hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
return;
}
@ -596,14 +554,15 @@ accessOK:
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if ( ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access>>2) & 1)
== pl ) {
Bit32u accessBits;
accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1<<pl) ) { // Read this pl OK.
Bit32u hostPageAddr;
Bit32u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access & 0xfffffff8;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit32u*) (hostPageAddr + pageOffset);
hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
return;
}
@ -650,37 +609,23 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access = BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // bit 4
#endif
(pl<<3) | // bit 3
(combined_access & 0x07); // bit 2,1,0
// Let bit0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit8u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit8u*) (hostPageAddr + pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
//BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit8u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
//BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
}
}
@ -728,38 +673,24 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // b4
#endif
(pl<<3) | // b3
(combined_access & 0x07); // b{2,1,0}
// Let b0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit16u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit16u*) (hostPageAddr + pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
accessBits =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit16u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
}
}
@ -805,38 +736,24 @@ accessOK:
tlbIndex = BX_TLB_INDEX_OF(laddr);
lpf = laddr & 0xfffff000;
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == lpf) {
Bit32u combined_access;
Bit32u accessBits;
// See if the TLB entry privilege level allows us write access
// from this CPL.
combined_access =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access;
if (combined_access & 1) { // TLB has seen a write already.
unsigned privIndex;
privIndex =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // b4
#endif
(pl<<3) | // b3
(combined_access & 0x07); // b{2,1,0}
// Let b0 slide through since
// we know it's 1 (W) from the
// check above.
if ( priv_check[privIndex] ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit32u *hostAddr;
hostPageAddr =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].combined_access &
0xfffffff8;
if (hostPageAddr) {
hostAddr = (Bit32u*) (hostPageAddr + pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
accessBits =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
if ( accessBits & (1 << (2 | pl)) ) {
// Current write access has privilege.
Bit32u hostPageAddr;
Bit32u *hostAddr;
hostPageAddr = accessBits & 0xfffff000;
if (hostPageAddr) {
hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
*data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.paddress1 =
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf | pageOffset;
BX_CPU_THIS_PTR address_xlation.pages = 1;
return;
}
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.26 2002-09-03 04:54:28 kevinlawton Exp $
// $Id: cpu.h,v 1.27 2002-09-04 08:59:13 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -525,7 +525,7 @@ typedef struct {
typedef struct {
Bit32u lpf; // linear page frame
Bit32u ppf; // physical page frame
Bit32u combined_access;
Bit32u accessBits;
} bx_TLB_entry;
#endif // #if BX_USE_TLB
@ -1481,6 +1481,7 @@ public: // for now...
BX_SMF void enable_paging(void);
BX_SMF void disable_paging(void);
BX_SMF void CR3_change(Bit32u value32);
BX_SMF void pagingWPChanged(void);
BX_SMF void reset(unsigned source);
BX_SMF void jump_protected(BxInstruction_t *, Bit16u cs, Bit32u disp32);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.11 2002-09-03 15:56:24 bdenney Exp $
// $Id: paging.cc,v 1.12 2002-09-04 08:59:13 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -28,10 +28,6 @@
#if 0
// - what should the reserved bits in the error code be ?
// - move CR0.wp bit in lookup table to cache. Then dump
// cache whenever it is changed. This eliminates the
// extra calculation and shifting.
// - change BX_READ and BX_WRITE to 0,1 ???
#endif
@ -321,7 +317,7 @@
#warning "Move priv_check to CPU fields, or init.cc"
#endif
unsigned priv_check[BX_PRIV_CHECK_SIZE];
static unsigned priv_check[BX_PRIV_CHECK_SIZE];
@ -353,6 +349,18 @@ BX_CPU_C::CR3_change(Bit32u value32)
BX_CPU_THIS_PTR cr3 = value32;
}
void
BX_CPU_C::pagingWPChanged(void)
{
// Since our TLB contains markings dependent on the previous value
// of CR0.WP, clear the cache and start over.
TLB_clear();
#ifndef _MSC_VER
#warning "duplicate with disable_paging etc?"
// maybe just do pagingCR0Changed()
#endif
}
void
BX_CPU_C::TLB_init(void)
{
@ -469,9 +477,9 @@ BX_CPU_C::dtranslate_linear(Bit32u laddress, unsigned pl, unsigned rw)
{
Bit32u lpf, ppf, poffset, TLB_index, error_code, paddress;
Bit32u pde, pde_addr;
unsigned priv_index;
Boolean isWrite;
Bit32u combined_access, new_combined_access;
Bit32u accessBits, combined_access;
unsigned priv_index;
lpf = laddress & 0xfffff000; // linear page frame
poffset = laddress & 0x00000fff; // physical offset
@ -481,31 +489,10 @@ BX_CPU_C::dtranslate_linear(Bit32u laddress, unsigned pl, unsigned rw)
isWrite = (rw>=BX_WRITE); // write or r-m-w
if (BX_CPU_THIS_PTR TLB.entry[TLB_index].lpf == lpf) {
paddress = BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf | poffset;
combined_access = BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access;
priv_index =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // bit 4
#endif
(pl<<3) | // bit 3
(combined_access & 0x06) | // bit 2,1
isWrite; // bit 0
if (priv_check[priv_index]) {
// Operation has proper privilege.
// If our TLB entry has _not_ been used with a write before, we need
// to update the PDE.A/PTE.{A,D} fields with a re-walk.
new_combined_access = combined_access | isWrite;
if (new_combined_access == combined_access) {
// A/D bits already up-to-date
return(paddress);
}
// If we have only seen reads for this TLB entry, but the
// permissions must be writeable, we must make sure the
// dirty bit (D) is set. To do this we must rewalk the page
// tables to find the PTE and to give a chance to pick up updated info.
goto pageTableWalk; // for clarity and in case of future mods
paddress = BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf | poffset;
accessBits = BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits;
if (accessBits & (1 << ((isWrite<<1) | pl)) ) {
return(paddress);
}
// The current access does not have permission according to the info
@ -633,8 +620,25 @@ pageTableWalk:
BX_CPU_THIS_PTR TLB.entry[TLB_index].lpf = lpf;
BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf = ppf;
BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access =
combined_access | isWrite;
// 1 << ((W<<1) | U)
// b0: Read Sys OK
// b1: Read User OK
// b2: Write Sys OK
// b3: Write User OK
if ( combined_access & 4 ) { // User
accessBits = 0x3; // User priv; read from {user,sys} OK.
if ( isWrite ) { // Current operation is a write (Dirty bit updated)
accessBits |= 0xc; // write from {user,sys} OK.
}
}
else { // System
accessBits = 0x1; // System priv; read from {sys} OK.
if ( isWrite ) { // Current operation is a write (Dirty bit updated)
accessBits |= 4; // write from {sys} OK.
}
}
BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits = accessBits;
#if BX_SupportGuest2HostTLB
{
@ -642,18 +646,8 @@ pageTableWalk:
hostPageAddr = (Bit32u) BX_CPU_THIS_PTR mem->getHostMemAddr(A20ADDR(ppf), rw);
if (hostPageAddr) {
// No veto; a host address was returned.
#if 0
if (hostPageAddr & 0x7) {
BX_PANIC( ("Paging.cc: guest->host code, & 7 sanity check failed!") );
}
#endif
// Host addresses of the beginning of each page must be aligned to
// at least 8-byte boundaries, so we can use the 'combined_access'
// field to store them. Note that for now, such addresses don't need
// to be 4k page aligned, so finaly addreses are generated with
// '+' and not '|'.
BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access |=
(hostPageAddr);
// Host addresses are now always 4k page aligned.
BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits |= hostPageAddr;
}
// Else leave the host address component zero (NULL) to signal no
// valid host address; use long path.
@ -683,8 +677,8 @@ BX_CPU_C::itranslate_linear(Bit32u laddress, unsigned pl)
{
Bit32u lpf, ppf, poffset, TLB_index, error_code, paddress;
Bit32u pde, pde_addr;
Bit32u accessBits, combined_access;
unsigned priv_index;
Bit32u combined_access;
lpf = laddress & 0xfffff000; // linear page frame
poffset = laddress & 0x00000fff; // physical offset
@ -692,18 +686,9 @@ BX_CPU_C::itranslate_linear(Bit32u laddress, unsigned pl)
if (BX_CPU_THIS_PTR TLB.entry[TLB_index].lpf == lpf) {
paddress = BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf | poffset;
combined_access = BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access;
priv_index =
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.wp<<4) | // bit 4
#endif
(pl<<3) | // bit 3
(combined_access & 0x06); // bit 2,1
// bit 0 == 0
if (priv_check[priv_index]) {
// Operation has proper privilege.
paddress = BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf | poffset;
accessBits = BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits;
if (accessBits & (1 << pl) ) {
return(paddress);
}
@ -824,8 +809,19 @@ pageTableWalk:
BX_CPU_THIS_PTR TLB.entry[TLB_index].lpf = lpf;
BX_CPU_THIS_PTR TLB.entry[TLB_index].ppf = ppf;
BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access = combined_access;
// 1 << ((W<<1) | U)
// b0: Read Sys OK
// b1: Read User OK
// b2: Write Sys OK
// b3: Write User OK
if ( combined_access & 4 ) { // User
accessBits = 0x3; // User priv; read from {user,sys} OK.
}
else { // System
accessBits = 0x1; // System priv; read from {sys} OK.
}
BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits = accessBits;
#if BX_SupportGuest2HostTLB
{
@ -834,18 +830,8 @@ pageTableWalk:
BX_CPU_THIS_PTR mem->getHostMemAddr(A20ADDR(ppf), BX_READ);
if (hostPageAddr) {
// No veto; a host address was returned.
#if 0
if (hostPageAddr & 0x7) {
BX_PANIC( ("Paging.cc: guest->host code, & 7 sanity check failed!") );
}
#endif
// Host addresses of the beginning of each page must be aligned to
// at least 8-byte boundaries, so we can use the 'combined_access'
// field to store them. Note that for now, such addresses don't need
// to be 4k page aligned, so finaly addreses are generated with
// '+' and not '|'.
BX_CPU_THIS_PTR TLB.entry[TLB_index].combined_access |=
(hostPageAddr);
// Host addresses are now always 4k page aligned.
BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits |= hostPageAddr;
}
}
#endif // BX_SupportGuest2HostTLB

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: proc_ctrl.cc,v 1.27 2002-09-01 20:12:09 kevinlawton Exp $
// $Id: proc_ctrl.cc,v 1.28 2002-09-04 08:59:13 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1070,9 +1070,13 @@ BX_CPU_C::SetCR0(Bit32u val_32)
// from either MOV_CdRd() or debug functions
// protection checks made already or forcing from debug
Boolean prev_pe, prev_pg;
#if BX_CPU_LEVEL >= 4
Boolean prev_wp;
#endif
prev_pe = BX_CPU_THIS_PTR cr0.pe;
prev_pg = BX_CPU_THIS_PTR cr0.pg;
prev_wp = BX_CPU_THIS_PTR cr0.wp;
BX_CPU_THIS_PTR cr0.pe = val_32 & 0x01;
BX_CPU_THIS_PTR cr0.mp = (val_32 >> 1) & 0x01;
@ -1115,6 +1119,8 @@ BX_CPU_C::SetCR0(Bit32u val_32)
enable_paging();
else if (prev_pg==1 && BX_CPU_THIS_PTR cr0.pg==0)
disable_paging();
if (prev_wp != BX_CPU_THIS_PTR cr0.wp)
pagingWPChanged();
}