Implemented Supervisor Mode Execution Protection (SMEP)

This commit is contained in:
Stanislav Shwartsman 2011-05-29 16:28:26 +00:00
parent 6ace540891
commit ee3f9e36cb
16 changed files with 138 additions and 68 deletions

View File

@ -191,6 +191,10 @@ cpu: count=1, ips=50000000, reset_on_triple_fault=1, ignore_bad_msrs=1, msrs="ms
# Enable GS/GS BASE access instructions support in long mode.
# This option exists only if Bochs compiled with x86-64 support.
#
# SMEP:
# Enable Supervisor Mode Execution Protection (SMEP) support.
# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
#
# MWAIT:
# Select MONITOR/MWAIT instructions support.
# This option exists only if Bochs compiled with --enable-monitor-mwait.

View File

@ -3,6 +3,8 @@ Changes after 2.4.6 release:
Bochs repository moved to the SVN version control !
- CPU
- Implemented Supervisor Mode Execution Protection (SMEP), the feature can
be enabled using .bochsrc CPUID option.
- Added support for XSAVEOPT instruction, the instruction can be enabled
using .bochsrc CPUID option.
- Added support for AVX instruction set emulation, to enable configure with

View File

@ -44,6 +44,7 @@ cpuid
1g_pages
pcid
fsgsbase
smep
mwait
mwait_is_nop

View File

@ -913,7 +913,8 @@ void bx_dbg_info_control_regs_command(void)
dbg_printf(" PWT=page-level write-through=%d\n", (cr3>>3) & 1);
#if BX_CPU_LEVEL >= 4
Bit32u cr4 = SIM->get_param_num("CR4", dbg_cpu_list)->get();
dbg_printf("CR4=0x%08x: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", cr4,
dbg_printf("CR4=0x%08x: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", cr4,
(cr4 & (1<<20)) ? "SMEP" : "smep",
(cr4 & (1<<18)) ? "OSXSAVE" : "osxsave",
(cr4 & (1<<17)) ? "PCID" : "pcid",
(cr4 & (1<<16)) ? "FSGSBASE" : "fsgsbase",

View File

@ -322,7 +322,7 @@ void bx_init_options()
// cpuid subtree
#if BX_CPU_LEVEL >= 4
bx_list_c *cpuid_param = new bx_list_c(root_param, "cpuid", "CPUID Options", 20);
bx_list_c *cpuid_param = new bx_list_c(root_param, "cpuid", "CPUID Options", 21);
new bx_param_bool_c(cpuid_param,
"cpuid_limit_winnt", "Limit max CPUID function to 3",
@ -434,6 +434,10 @@ void bx_init_options()
"FS/GS BASE access instructions support in long mode",
0);
#endif
new bx_param_bool_c(cpuid_param,
"smep", "Supervisor Mode Execution Protection support",
"Supervisor Mode Execution Protection support",
0);
#if BX_SUPPORT_MONITOR_MWAIT
new bx_param_bool_c(cpuid_param,
"mwait", "MONITOR/MWAIT instructions support",
@ -2700,6 +2704,10 @@ static int parse_line_formatted(const char *context, int num_params, char *param
PARSE_ERR(("%s: cpuid directive malformed.", context));
}
#endif
} else if (!strncmp(params[i], "smep=", 5)) {
if (parse_param_bool(params[i], 5, BXPN_CPUID_SMEP) < 0) {
PARSE_ERR(("%s: cpuid directive malformed.", context));
}
#if BX_SUPPORT_MONITOR_MWAIT
} else if (!strncmp(params[i], "mwait=", 6)) {
if (parse_param_bool(params[i], 6, BXPN_CPUID_MWAIT) < 0) {
@ -3900,13 +3908,14 @@ int bx_write_configuration(const char *rc, int overwrite)
SIM->get_param_enum(BXPN_CPUID_APIC)->get_selected());
#endif
#if BX_CPU_LEVEL >= 6
fprintf(fp, ", sse=%s, sep=%d, aes=%d, xsave=%d, xsaveopt=%d, movbe=%d",
fprintf(fp, ", sse=%s, sep=%d, aes=%d, xsave=%d, xsaveopt=%d, movbe=%d, smep=%d",
SIM->get_param_enum(BXPN_CPUID_SSE)->get_selected(),
SIM->get_param_bool(BXPN_CPUID_SEP)->get(),
SIM->get_param_bool(BXPN_CPUID_AES)->get(),
SIM->get_param_bool(BXPN_CPUID_XSAVE)->get(),
SIM->get_param_bool(BXPN_CPUID_XSAVEOPT)->get(),
SIM->get_param_bool(BXPN_CPUID_MOVBE)->get());
SIM->get_param_bool(BXPN_CPUID_MOVBE)->get(),
SIM->get_param_bool(BXPN_CPUID_SMEP)->get());
#if BX_SUPPORT_AVX
fprintf(fp, ", avx=%d", SIM->get_param_bool(BXPN_CPUID_AVX)->get());
#endif

View File

@ -722,7 +722,7 @@ void BX_CPU_C::prefetch(void)
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
}
else {
bx_phy_address pAddr = translate_linear(laddr, CPL, BX_EXECUTE);
bx_phy_address pAddr = translate_linear(laddr, USER_PL, BX_EXECUTE);
BX_CPU_THIS_PTR pAddrPage = PPFOf(pAddr);
}

View File

@ -647,6 +647,7 @@ typedef struct
#define BX_CPU_X2APIC (1 << 8) /* X2APIC support */
#define BX_CPU_1G_PAGES (1 << 9) /* 1Gb pages support */
#define BX_CPU_PCID (1 << 10) /* PCID pages support */
#define BX_CPU_SMEP (1 << 11) /* SMEP support */
#include "cpuid.h"
#include "crregs.h"
@ -3306,21 +3307,17 @@ public: // for now...
BX_SMF bx_hostpageaddr_t getHostMemAddr(bx_phy_address addr, unsigned rw);
// linear address for translate_linear expected to be canonical !
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw);
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned user, unsigned rw);
#if BX_CPU_LEVEL >= 6
BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw);
BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
BX_SMF int check_entry_PAE(const char *s, Bit64u entry, Bit64u reserved, unsigned rw, bx_bool *nx_fault);
#endif
#if BX_SUPPORT_X86_64
BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw);
BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
#endif
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_phy_address translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw);
#endif
BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
{
return translate_linear(laddr, curr_pl, rw);
}
#if BX_CPU_LEVEL >= 6
BX_SMF void TLB_flushNonGlobal(void);
@ -3501,6 +3498,7 @@ public: // for now...
BX_SMF BX_CPP_INLINE int bx_cpuid_support_pcid(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_xsave(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_fsgsbase(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_smep(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_x2apic(void);
BX_SMF BX_CPP_INLINE unsigned which_cpu(void) { return BX_CPU_THIS_PTR bx_cpuid; }
@ -3947,12 +3945,17 @@ BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_pcid(void)
BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_fsgsbase(void)
{
#if BX_SUPPORT_X86_64
return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & 0x1;
return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & BX_CPUID_EXT3_FSGSBASE;
#else
return 0;
#endif
}
BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_smep(void)
{
return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & BX_CPUID_EXT3_SMEP;
}
BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_vme(void)
{
return (BX_CPU_THIS_PTR cpuid_std_function[1].edx & BX_CPUID_STD_VME);

View File

@ -152,7 +152,6 @@ Bit32u BX_CPU_C::get_extended_cpuid_features(void)
features |= BX_CPUID_EXT_XSAVE | BX_CPUID_EXT_OSXSAVE;
#if BX_SUPPORT_AVX
// support AVX extensions
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_AVX))
features |= BX_CPUID_EXT_AVX;
#endif
@ -174,6 +173,9 @@ Bit32u BX_CPU_C::get_ext3_cpuid_features(void)
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_FSGSBASE))
features |= BX_CPUID_EXT3_FSGSBASE;
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_SMEP))
features |= BX_CPUID_EXT3_SMEP;
return features;
}
#endif
@ -1202,14 +1204,16 @@ void BX_CPU_C::init_cpu_features_bitmask(void)
features_bitmask |= BX_CPU_VME;
features_bitmask |= BX_CPU_DEBUG_EXTENSIONS;
features_bitmask |= BX_CPU_PSE;
#endif
#if BX_CPU_LEVEL >= 6
features_bitmask |= BX_CPU_PAE;
features_bitmask |= BX_CPU_PGE;
features_bitmask |= BX_CPU_PSE36;
features_bitmask |= BX_CPU_PAT_MTRR;
#endif
static bx_bool smep_enabled = SIM->get_param_bool(BXPN_CPUID_SMEP)->get();
if (smep_enabled)
features_bitmask |= BX_CPU_SMEP;
#if BX_SUPPORT_X86_64
static bx_bool pcid_enabled = SIM->get_param_bool(BXPN_CPUID_PCID)->get();
@ -1217,5 +1221,9 @@ void BX_CPU_C::init_cpu_features_bitmask(void)
features_bitmask |= BX_CPU_PCID;
#endif
#endif // CPU_LEVEL >= 6
#endif // CPU_LEVEL >= 5
BX_CPU_THIS_PTR cpu_extensions_bitmask = features_bitmask;
}

View File

@ -982,7 +982,9 @@ Bit32u BX_CPU_C::get_cr4_allow_mask(void)
Bit32u allowMask = 0;
// CR4 bits definitions:
// [31-19] Reserved, Must be Zero
// [31-21] Reserved, Must be Zero
// [20] SMEP: Supervisor Mode Execution Protection R/W
// [19] Reserved, Must be Zero
// [18] OSXSAVE: Operating System XSAVE Support R/W
// [17] PCIDE: PCID Support R/W
// [16] FSGSBASE: FS/GS BASE access R/W
@ -1061,6 +1063,9 @@ Bit32u BX_CPU_C::get_cr4_allow_mask(void)
/* OSXSAVE */
if (bx_cpuid_support_xsave())
allowMask |= BX_CR4_OSXSAVE_MASK;
if (bx_cpuid_support_smep())
allowMask |= BX_CR4_SMEP_MASK;
#endif
return allowMask;
@ -1117,9 +1122,9 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR4(bx_address val)
if (! check_CR4(val)) return 0;
#if BX_CPU_LEVEL >= 6
// Modification of PGE,PAE,PSE,PCIDE flushes TLB cache according to docs.
// Modification of PGE,PAE,PSE,PCIDE,SMEP flushes TLB cache according to docs.
if ((val & BX_CR4_FLUSH_TLB_MASK) != (BX_CPU_THIS_PTR cr4.val32 & BX_CR4_FLUSH_TLB_MASK)) {
// reload PDPTR if PGE,PAE or PSE changed
// reload PDPTR if needed
if (BX_CPU_THIS_PTR cr0.get_PG() && (val & BX_CR4_PAE_MASK) != 0 && !long_mode()) {
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
BX_ERROR(("SetCR4(): PDPTR check failed !"));

View File

@ -124,7 +124,7 @@ struct bx_cr4_t {
};
#define BX_CR4_FLUSH_TLB_MASK \
(BX_CR4_PSE_MASK | BX_CR4_PAE_MASK | BX_CR4_PGE_MASK | BX_CR4_PCIDE_MASK)
(BX_CR4_PSE_MASK | BX_CR4_PAE_MASK | BX_CR4_PGE_MASK | BX_CR4_PCIDE_MASK | BX_CR4_SMEP_MASK)
#endif // #if BX_CPU_LEVEL >= 4

View File

@ -496,9 +496,16 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsig
unsigned isWrite = rw & 1;
error_code |= (user << 2) | (isWrite << 1);
#if BX_CPU_LEVEL >= 6
if (rw == BX_EXECUTE) {
if (BX_CPU_THIS_PTR cr4.get_SMEP())
error_code |= ERROR_CODE_ACCESS; // I/D = 1
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR cr4.get_PAE() && BX_CPU_THIS_PTR efer.get_NXE() && rw == BX_EXECUTE)
error_code |= ERROR_CODE_ACCESS; // I/D = 1
if (BX_CPU_THIS_PTR cr4.get_PAE() && BX_CPU_THIS_PTR efer.get_NXE())
error_code |= ERROR_CODE_ACCESS;
#endif
}
#endif
#if BX_SUPPORT_VMX
@ -647,13 +654,12 @@ int BX_CPU_C::check_entry_PAE(const char *s, Bit64u entry, Bit64u reserved, unsi
#if BX_SUPPORT_X86_64
// Translate a linear address to a physical address in long mode
bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw)
bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw)
{
bx_phy_address entry_addr[4];
bx_phy_address ppf = BX_CPU_THIS_PTR cr3 & BX_CR3_PAGING_MASK;
Bit64u entry[4];
bx_bool nx_fault = 0;
unsigned pl = (curr_pl == 3);
int leaf = BX_LEVEL_PTE;
combined_access = 0x06;
@ -671,7 +677,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
Bit64u curr_entry = entry[leaf];
int fault = check_entry_PAE(bx_paging_level[leaf], curr_entry, PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
page_fault(fault, laddr, pl, rw);
page_fault(fault, laddr, user, rw);
combined_access &= curr_entry & 0x06; // U/S and R/W
ppf = curr_entry & BX_CONST64(0x000ffffffffff000);
@ -681,13 +687,13 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
if (curr_entry & 0x80) {
if (leaf > (BX_LEVEL_PDE + !!bx_cpuid_support_1g_paging())) {
BX_DEBUG(("%s: PS bit set !"));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
if (leaf == BX_LEVEL_PDPE) {
if (curr_entry & PAGING_PAE_PDPTE1G_RESERVED_BITS) {
BX_DEBUG(("PAE PDPE1G: reserved bit is set: PDPE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
@ -699,7 +705,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
if (leaf == BX_LEVEL_PDE) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
@ -713,11 +719,16 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
bx_bool isWrite = (rw & 1); // write or r-m-w
unsigned priv_index = (BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
(pl<<3) | // bit 3
(user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index] || nx_fault)
page_fault(ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_PROTECTION, laddr, user, rw);
if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
if (combined_access & 0x4) // User page
page_fault(ERROR_PROTECTION, laddr, user, rw);
}
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (entry[leaf] & 0x100); // G
@ -808,18 +819,17 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(Bit64u *pdptr)
#endif
// Translate a linear address to a physical address in PAE paging mode
bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw)
bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw)
{
bx_phy_address entry_addr[3], ppf;
Bit64u entry[3];
bx_bool nx_fault = 0;
unsigned pl = (curr_pl == 3);
int leaf = BX_LEVEL_PTE;
combined_access = 0x06;
#if BX_SUPPORT_X86_64
if (long_mode()) {
return translate_linear_long_mode(laddr, lpf_mask, combined_access, curr_pl, rw);
return translate_linear_long_mode(laddr, lpf_mask, combined_access, user, rw);
}
#endif
@ -835,7 +845,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
int fault = check_entry_PAE("PDPE", entry[BX_LEVEL_PDPE], PAGING_PAE_PDPTE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
page_fault(fault, laddr, pl, rw);
page_fault(fault, laddr, user, rw);
entry_addr[BX_LEVEL_PDE] = (bx_phy_address)((entry[BX_LEVEL_PDPE] & BX_CONST64(0x000ffffffffff000))
| ((laddr & 0x3fe00000) >> 18));
@ -850,7 +860,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
fault = check_entry_PAE("PDE", entry[BX_LEVEL_PDE], PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
page_fault(fault, laddr, pl, rw);
page_fault(fault, laddr, user, rw);
combined_access &= entry[BX_LEVEL_PDE] & 0x06; // U/S and R/W
@ -858,7 +868,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
if (entry[BX_LEVEL_PDE] & 0x80) {
if (entry[BX_LEVEL_PDE] & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(entry[BX_LEVEL_PDE]), GET32L(entry[BX_LEVEL_PDE])));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
ppf = (bx_phy_address)((entry[BX_LEVEL_PDE] & BX_CONST64(0x000fffffffe00000)) | (laddr & 0x001ff000));
@ -880,7 +890,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
fault = check_entry_PAE("PTE", entry[BX_LEVEL_PTE], PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
page_fault(fault, laddr, pl, rw);
page_fault(fault, laddr, user, rw);
combined_access &= entry[BX_LEVEL_PTE] & 0x06; // U/S and R/W
@ -892,11 +902,16 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
bx_bool isWrite = (rw & 1); // write or r-m-w
unsigned priv_index = (BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
(pl<<3) | // bit 3
(user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index] || nx_fault)
page_fault(ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_PROTECTION, laddr, user, rw);
if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
if (combined_access & 0x4) // User page
page_fault(ERROR_PROTECTION, laddr, user, rw);
}
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (entry[leaf] & 0x100); // G
@ -946,17 +961,20 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
(((1 << (41-BX_PHY_ADDRESS_WIDTH))-1) << (13 + BX_PHY_ADDRESS_WIDTH - 32))
// Translate a linear address to a physical address
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned user, unsigned rw)
{
Bit32u combined_access = 0x06;
Bit32u lpf_mask = 0xfff; // 4K pages
unsigned priv_index;
#if BX_SUPPORT_X86_64
if (! long_mode()) laddr &= 0xffffffff;
#endif
// note - we assume physical memory < 4gig so for brevity & speed, we'll use
// 32 bit entries although cr3 is expanded to 64 bits.
bx_phy_address paddress, ppf, poffset = PAGE_OFFSET(laddr);
bx_bool isWrite = rw & 1; // write or r-m-w
unsigned pl = (curr_pl == 3);
InstrTLB_Increment(tlbLookups);
InstrTLB_Stats();
@ -971,7 +989,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
paddress = tlbEntry->ppf | poffset;
bx_bool isExecute = (rw == BX_EXECUTE);
if (! (tlbEntry->accessBits & ((isExecute<<2) | (isWrite<<1) | pl)))
if (! (tlbEntry->accessBits & ((isExecute<<2) | (isWrite<<1) | user)))
return paddress;
// The current access does not have permission according to the info
@ -988,7 +1006,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
#if BX_CPU_LEVEL >= 6
if (BX_CPU_THIS_PTR cr4.get_PAE()) {
ppf = translate_linear_PAE(laddr, lpf_mask, combined_access, curr_pl, rw);
ppf = translate_linear_PAE(laddr, lpf_mask, combined_access, user, rw);
}
else
#endif
@ -1008,7 +1026,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
if (!(pde & 0x1)) {
BX_DEBUG(("PDE: entry not present"));
page_fault(ERROR_NOT_PRESENT, laddr, pl, rw);
page_fault(ERROR_NOT_PRESENT, laddr, user, rw);
}
#if BX_CPU_LEVEL >= 5
@ -1016,7 +1034,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
// 4M paging, only if CR4.PSE enabled, ignore PDE.PS otherwise
if (pde & PAGING_PDE4M_RESERVED_BITS) {
BX_DEBUG(("PSE PDE4M: reserved bit is set: PDE=0x%08x", pde));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Combined access is just access from the pde (no pte involved).
@ -1024,13 +1042,18 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
priv_index =
(BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
(pl<<3) | // bit 3
(user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index])
page_fault(ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_PROTECTION, laddr, user, rw);
#if BX_CPU_LEVEL >= 6
if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
if (combined_access & 0x4) // User page
page_fault(ERROR_PROTECTION, laddr, user, rw);
}
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= pde & 0x100; // G
#endif
@ -1065,7 +1088,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
if (!(pte & 0x1)) {
BX_DEBUG(("PTE: entry not present"));
page_fault(ERROR_NOT_PRESENT, laddr, pl, rw);
page_fault(ERROR_NOT_PRESENT, laddr, user, rw);
}
// 386 and 486+ have different behaviour for combining
@ -1081,13 +1104,18 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
#endif
(pl<<3) | // bit 3
(user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index])
page_fault(ERROR_PROTECTION, laddr, pl, rw);
page_fault(ERROR_PROTECTION, laddr, user, rw);
#if BX_CPU_LEVEL >= 6
if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
if (combined_access & 0x4) // User page
page_fault(ERROR_PROTECTION, laddr, user, rw);
}
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (pte & 0x100); // G
#endif
@ -1571,7 +1599,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
/* check for reference across multiple pages */
if ((pageOffset + len) <= 4096) {
// Access within single page.
BX_CPU_THIS_PTR address_xlation.paddress1 = dtranslate_linear(laddr, curr_pl, BX_WRITE);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl==3), BX_WRITE);
BX_CPU_THIS_PTR address_xlation.pages = 1;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, BX_CPU_THIS_PTR address_xlation.paddress1, len, BX_WRITE);
@ -1582,8 +1610,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
}
else {
// access across 2 pages
BX_CPU_THIS_PTR address_xlation.paddress1 =
dtranslate_linear(laddr, curr_pl, BX_WRITE);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), BX_WRITE);
BX_CPU_THIS_PTR address_xlation.len1 = 4096 - pageOffset;
BX_CPU_THIS_PTR address_xlation.len2 = len - BX_CPU_THIS_PTR address_xlation.len1;
BX_CPU_THIS_PTR address_xlation.pages = 2;
@ -1591,7 +1618,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
#if BX_SUPPORT_X86_64
if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */
#endif
BX_CPU_THIS_PTR address_xlation.paddress2 = dtranslate_linear(laddr2, curr_pl, BX_WRITE);
BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(laddr2, (curr_pl == 3), BX_WRITE);
#ifdef BX_LITTLE_ENDIAN
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr,
@ -1646,7 +1673,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
/* check for reference across multiple pages */
if ((pageOffset + len) <= 4096) {
// Access within single page.
BX_CPU_THIS_PTR address_xlation.paddress1 = dtranslate_linear(laddr, curr_pl, xlate_rw);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), xlate_rw);
BX_CPU_THIS_PTR address_xlation.pages = 1;
access_read_physical(BX_CPU_THIS_PTR address_xlation.paddress1, len, data);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr,
@ -1657,8 +1684,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
}
else {
// access across 2 pages
BX_CPU_THIS_PTR address_xlation.paddress1 =
dtranslate_linear(laddr, curr_pl, xlate_rw);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), xlate_rw);
BX_CPU_THIS_PTR address_xlation.len1 = 4096 - pageOffset;
BX_CPU_THIS_PTR address_xlation.len2 = len - BX_CPU_THIS_PTR address_xlation.len1;
BX_CPU_THIS_PTR address_xlation.pages = 2;
@ -1666,7 +1692,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
#if BX_SUPPORT_X86_64
if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */
#endif
BX_CPU_THIS_PTR address_xlation.paddress2 = dtranslate_linear(laddr2, curr_pl, xlate_rw);
BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(laddr2, (curr_pl == 3), xlate_rw);
#ifdef BX_LITTLE_ENDIAN
access_read_physical(BX_CPU_THIS_PTR address_xlation.paddress1,

View File

@ -209,7 +209,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
#if BX_INSTRUMENTATION
bx_phy_address paddr =
#endif
A20ADDR(dtranslate_linear(laddr, CPL, BX_READ));
A20ADDR(translate_linear(laddr, USER_PL, BX_READ));
BX_INSTR_CLFLUSH(BX_CPU_ID, laddr, paddr);
@ -533,7 +533,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
}
}
bx_phy_address paddr = A20ADDR(dtranslate_linear(laddr, CPL, BX_READ));
bx_phy_address paddr = A20ADDR(translate_linear(laddr, USER_PL, BX_READ));
// Set the monitor immediately. If monitor is still armed when we MWAIT,
// the processor will stall.

View File

@ -188,8 +188,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
// used in the task switch are paged in.
if (BX_CPU_THIS_PTR cr0.get_PG())
{
dtranslate_linear(nbase32, 0, BX_READ); // old TSS
dtranslate_linear(nbase32 + new_TSS_max, 0, BX_READ);
translate_linear(nbase32, 0, BX_READ); // old TSS
translate_linear(nbase32 + new_TSS_max, 0, BX_READ);
// ??? Humm, we check the new TSS region with READ above,
// but sometimes we need to write the link field in that
@ -200,8 +200,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (source == BX_TASK_FROM_CALL || source == BX_TASK_FROM_INT)
{
dtranslate_linear(nbase32, 0, BX_WRITE);
dtranslate_linear(nbase32 + 1, 0, BX_WRITE);
translate_linear(nbase32, 0, BX_WRITE);
translate_linear(nbase32 + 1, 0, BX_WRITE);
}
}
@ -242,8 +242,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (BX_CPU_THIS_PTR tr.cache.type <= 3) {
// check that we won't page fault while writing
if (BX_CPU_THIS_PTR cr0.get_PG()) {
dtranslate_linear(Bit32u(obase32 + 14), 0, BX_WRITE);
dtranslate_linear(Bit32u(obase32 + 41), 0, BX_WRITE);
translate_linear(Bit32u(obase32 + 14), 0, BX_WRITE);
translate_linear(Bit32u(obase32 + 41), 0, BX_WRITE);
}
system_write_word(Bit32u(obase32 + 14), IP);
@ -269,8 +269,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
else {
// check that we won't page fault while writing
if (BX_CPU_THIS_PTR cr0.get_PG()) {
dtranslate_linear(Bit32u(obase32 + 0x20), 0, BX_WRITE);
dtranslate_linear(Bit32u(obase32 + 0x5d), 0, BX_WRITE);
translate_linear(Bit32u(obase32 + 0x20), 0, BX_WRITE);
translate_linear(Bit32u(obase32 + 0x5d), 0, BX_WRITE);
}
system_write_dword(Bit32u(obase32 + 0x20), EIP);

View File

@ -3119,6 +3119,11 @@ This option exists only if Bochs compiled with x86-64 support.
Enable Process-Context Identifiers (PCID) support in long mode.
This option exists only if Bochs compiled with x86-64 support.
</para>
<para><command>smep</command></para>
<para>
Enable Supervisor Mode Execution Protection (SMEP) support.
This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
</para>
<para><command>mwait</command></para>
<para>
Select MONITOR/MWAIT instructions support.

View File

@ -244,6 +244,11 @@ pcid:
Enable Process-Context Identifiers (PCID) support in long mode.
This option exists only if Bochs compiled with x86-64 support.
smep:
Enable Supervisor Mode Execution Protection (SMEP) support.
This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
mwait:
Select MONITOR/MWAIT instructions support.

View File

@ -60,6 +60,7 @@
#define BXPN_CPUID_1G_PAGES "cpuid.1g_pages"
#define BXPN_CPUID_PCID "cpuid.pcid"
#define BXPN_CPUID_FSGSBASE "cpuid.fsgsbase"
#define BXPN_CPUID_SMEP "cpuid.smep"
#define BXPN_MEM_SIZE "memory.standard.ram.size"
#define BXPN_HOST_MEM_SIZE "memory.standard.ram.host_size"
#define BXPN_ROM_PATH "memory.standard.rom.path"