diff --git a/bochs/.bochsrc b/bochs/.bochsrc
index 0eb9f2742..5f5199ec5 100644
--- a/bochs/.bochsrc
+++ b/bochs/.bochsrc
@@ -191,6 +191,10 @@ cpu: count=1, ips=50000000, reset_on_triple_fault=1, ignore_bad_msrs=1, msrs="ms
# Enable GS/GS BASE access instructions support in long mode.
# This option exists only if Bochs compiled with x86-64 support.
#
+# SMEP:
+# Enable Supervisor Mode Execution Protection (SMEP) support.
+# This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
+#
# MWAIT:
# Select MONITOR/MWAIT instructions support.
# This option exists only if Bochs compiled with --enable-monitor-mwait.
diff --git a/bochs/CHANGES b/bochs/CHANGES
index 84f59786c..4a4be6bc5 100644
--- a/bochs/CHANGES
+++ b/bochs/CHANGES
@@ -3,6 +3,8 @@ Changes after 2.4.6 release:
Bochs repository moved to the SVN version control !
- CPU
+ - Implemented Supervisor Mode Execution Protection (SMEP), the feature can
+ be enabled using .bochsrc CPUID option.
- Added support for XSAVEOPT instruction, the instruction can be enabled
using .bochsrc CPUID option.
- Added support for AVX instruction set emulation, to enable configure with
diff --git a/bochs/PARAM_TREE.txt b/bochs/PARAM_TREE.txt
index cdc46d55b..8e33cec4a 100644
--- a/bochs/PARAM_TREE.txt
+++ b/bochs/PARAM_TREE.txt
@@ -44,6 +44,7 @@ cpuid
1g_pages
pcid
fsgsbase
+ smep
mwait
mwait_is_nop
diff --git a/bochs/bx_debug/dbg_main.cc b/bochs/bx_debug/dbg_main.cc
index 93cf4ad1a..11f1eaceb 100644
--- a/bochs/bx_debug/dbg_main.cc
+++ b/bochs/bx_debug/dbg_main.cc
@@ -913,7 +913,8 @@ void bx_dbg_info_control_regs_command(void)
dbg_printf(" PWT=page-level write-through=%d\n", (cr3>>3) & 1);
#if BX_CPU_LEVEL >= 4
Bit32u cr4 = SIM->get_param_num("CR4", dbg_cpu_list)->get();
- dbg_printf("CR4=0x%08x: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", cr4,
+ dbg_printf("CR4=0x%08x: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", cr4,
+ (cr4 & (1<<20)) ? "SMEP" : "smep",
(cr4 & (1<<18)) ? "OSXSAVE" : "osxsave",
(cr4 & (1<<17)) ? "PCID" : "pcid",
(cr4 & (1<<16)) ? "FSGSBASE" : "fsgsbase",
diff --git a/bochs/config.cc b/bochs/config.cc
index 1b7313f10..cbda7ede9 100644
--- a/bochs/config.cc
+++ b/bochs/config.cc
@@ -322,7 +322,7 @@ void bx_init_options()
// cpuid subtree
#if BX_CPU_LEVEL >= 4
- bx_list_c *cpuid_param = new bx_list_c(root_param, "cpuid", "CPUID Options", 20);
+ bx_list_c *cpuid_param = new bx_list_c(root_param, "cpuid", "CPUID Options", 21);
new bx_param_bool_c(cpuid_param,
"cpuid_limit_winnt", "Limit max CPUID function to 3",
@@ -434,6 +434,10 @@ void bx_init_options()
"FS/GS BASE access instructions support in long mode",
0);
#endif
+ new bx_param_bool_c(cpuid_param,
+ "smep", "Supervisor Mode Execution Protection support",
+ "Supervisor Mode Execution Protection support",
+ 0);
#if BX_SUPPORT_MONITOR_MWAIT
new bx_param_bool_c(cpuid_param,
"mwait", "MONITOR/MWAIT instructions support",
@@ -2700,6 +2704,10 @@ static int parse_line_formatted(const char *context, int num_params, char *param
PARSE_ERR(("%s: cpuid directive malformed.", context));
}
#endif
+ } else if (!strncmp(params[i], "smep=", 5)) {
+ if (parse_param_bool(params[i], 5, BXPN_CPUID_SMEP) < 0) {
+ PARSE_ERR(("%s: cpuid directive malformed.", context));
+ }
#if BX_SUPPORT_MONITOR_MWAIT
} else if (!strncmp(params[i], "mwait=", 6)) {
if (parse_param_bool(params[i], 6, BXPN_CPUID_MWAIT) < 0) {
@@ -3900,13 +3908,14 @@ int bx_write_configuration(const char *rc, int overwrite)
SIM->get_param_enum(BXPN_CPUID_APIC)->get_selected());
#endif
#if BX_CPU_LEVEL >= 6
- fprintf(fp, ", sse=%s, sep=%d, aes=%d, xsave=%d, xsaveopt=%d, movbe=%d",
+ fprintf(fp, ", sse=%s, sep=%d, aes=%d, xsave=%d, xsaveopt=%d, movbe=%d, smep=%d",
SIM->get_param_enum(BXPN_CPUID_SSE)->get_selected(),
SIM->get_param_bool(BXPN_CPUID_SEP)->get(),
SIM->get_param_bool(BXPN_CPUID_AES)->get(),
SIM->get_param_bool(BXPN_CPUID_XSAVE)->get(),
SIM->get_param_bool(BXPN_CPUID_XSAVEOPT)->get(),
- SIM->get_param_bool(BXPN_CPUID_MOVBE)->get());
+ SIM->get_param_bool(BXPN_CPUID_MOVBE)->get(),
+ SIM->get_param_bool(BXPN_CPUID_SMEP)->get());
#if BX_SUPPORT_AVX
fprintf(fp, ", avx=%d", SIM->get_param_bool(BXPN_CPUID_AVX)->get());
#endif
diff --git a/bochs/cpu/cpu.cc b/bochs/cpu/cpu.cc
index 3d7d8f38e..f801c9cd8 100644
--- a/bochs/cpu/cpu.cc
+++ b/bochs/cpu/cpu.cc
@@ -722,7 +722,7 @@ void BX_CPU_C::prefetch(void)
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
}
else {
- bx_phy_address pAddr = translate_linear(laddr, CPL, BX_EXECUTE);
+ bx_phy_address pAddr = translate_linear(laddr, USER_PL, BX_EXECUTE);
BX_CPU_THIS_PTR pAddrPage = PPFOf(pAddr);
}
diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h
index fbb4637db..f90521a8f 100644
--- a/bochs/cpu/cpu.h
+++ b/bochs/cpu/cpu.h
@@ -647,6 +647,7 @@ typedef struct
#define BX_CPU_X2APIC (1 << 8) /* X2APIC support */
#define BX_CPU_1G_PAGES (1 << 9) /* 1Gb pages support */
#define BX_CPU_PCID (1 << 10) /* PCID pages support */
+#define BX_CPU_SMEP (1 << 11) /* SMEP support */
#include "cpuid.h"
#include "crregs.h"
@@ -3306,21 +3307,17 @@ public: // for now...
BX_SMF bx_hostpageaddr_t getHostMemAddr(bx_phy_address addr, unsigned rw);
// linear address for translate_linear expected to be canonical !
- BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw);
+ BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned user, unsigned rw);
#if BX_CPU_LEVEL >= 6
- BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw);
+ BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
BX_SMF int check_entry_PAE(const char *s, Bit64u entry, Bit64u reserved, unsigned rw, bx_bool *nx_fault);
#endif
#if BX_SUPPORT_X86_64
- BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw);
+ BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
#endif
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_phy_address translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw);
#endif
- BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
- {
- return translate_linear(laddr, curr_pl, rw);
- }
#if BX_CPU_LEVEL >= 6
BX_SMF void TLB_flushNonGlobal(void);
@@ -3501,6 +3498,7 @@ public: // for now...
BX_SMF BX_CPP_INLINE int bx_cpuid_support_pcid(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_xsave(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_fsgsbase(void);
+ BX_SMF BX_CPP_INLINE int bx_cpuid_support_smep(void);
BX_SMF BX_CPP_INLINE int bx_cpuid_support_x2apic(void);
BX_SMF BX_CPP_INLINE unsigned which_cpu(void) { return BX_CPU_THIS_PTR bx_cpuid; }
@@ -3947,12 +3945,17 @@ BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_pcid(void)
BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_fsgsbase(void)
{
#if BX_SUPPORT_X86_64
- return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & 0x1;
+ return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & BX_CPUID_EXT3_FSGSBASE;
#else
return 0;
#endif
}
+BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_smep(void)
+{
+ return BX_CPU_THIS_PTR cpuid_std_function[7].ebx & BX_CPUID_EXT3_SMEP;
+}
+
BX_CPP_INLINE int BX_CPU_C::bx_cpuid_support_vme(void)
{
return (BX_CPU_THIS_PTR cpuid_std_function[1].edx & BX_CPUID_STD_VME);
diff --git a/bochs/cpu/cpuid.cc b/bochs/cpu/cpuid.cc
index 9069c728b..cad3fa72e 100644
--- a/bochs/cpu/cpuid.cc
+++ b/bochs/cpu/cpuid.cc
@@ -152,7 +152,6 @@ Bit32u BX_CPU_C::get_extended_cpuid_features(void)
features |= BX_CPUID_EXT_XSAVE | BX_CPUID_EXT_OSXSAVE;
#if BX_SUPPORT_AVX
- // support AVX extensions
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_AVX))
features |= BX_CPUID_EXT_AVX;
#endif
@@ -174,6 +173,9 @@ Bit32u BX_CPU_C::get_ext3_cpuid_features(void)
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_FSGSBASE))
features |= BX_CPUID_EXT3_FSGSBASE;
+ if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_CPU_SMEP))
+ features |= BX_CPUID_EXT3_SMEP;
+
return features;
}
#endif
@@ -1202,14 +1204,16 @@ void BX_CPU_C::init_cpu_features_bitmask(void)
features_bitmask |= BX_CPU_VME;
features_bitmask |= BX_CPU_DEBUG_EXTENSIONS;
features_bitmask |= BX_CPU_PSE;
-#endif
#if BX_CPU_LEVEL >= 6
features_bitmask |= BX_CPU_PAE;
features_bitmask |= BX_CPU_PGE;
features_bitmask |= BX_CPU_PSE36;
features_bitmask |= BX_CPU_PAT_MTRR;
-#endif
+
+ static bx_bool smep_enabled = SIM->get_param_bool(BXPN_CPUID_SMEP)->get();
+ if (smep_enabled)
+ features_bitmask |= BX_CPU_SMEP;
#if BX_SUPPORT_X86_64
static bx_bool pcid_enabled = SIM->get_param_bool(BXPN_CPUID_PCID)->get();
@@ -1217,5 +1221,9 @@ void BX_CPU_C::init_cpu_features_bitmask(void)
features_bitmask |= BX_CPU_PCID;
#endif
+#endif // CPU_LEVEL >= 6
+
+#endif // CPU_LEVEL >= 5
+
BX_CPU_THIS_PTR cpu_extensions_bitmask = features_bitmask;
}
diff --git a/bochs/cpu/crregs.cc b/bochs/cpu/crregs.cc
index e48828081..a92c4884b 100644
--- a/bochs/cpu/crregs.cc
+++ b/bochs/cpu/crregs.cc
@@ -982,7 +982,9 @@ Bit32u BX_CPU_C::get_cr4_allow_mask(void)
Bit32u allowMask = 0;
// CR4 bits definitions:
- // [31-19] Reserved, Must be Zero
+ // [31-21] Reserved, Must be Zero
+ // [20] SMEP: Supervisor Mode Execution Protection R/W
+ // [19] Reserved, Must be Zero
// [18] OSXSAVE: Operating System XSAVE Support R/W
// [17] PCIDE: PCID Support R/W
// [16] FSGSBASE: FS/GS BASE access R/W
@@ -1061,6 +1063,9 @@ Bit32u BX_CPU_C::get_cr4_allow_mask(void)
/* OSXSAVE */
if (bx_cpuid_support_xsave())
allowMask |= BX_CR4_OSXSAVE_MASK;
+
+ if (bx_cpuid_support_smep())
+ allowMask |= BX_CR4_SMEP_MASK;
#endif
return allowMask;
@@ -1117,9 +1122,9 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR4(bx_address val)
if (! check_CR4(val)) return 0;
#if BX_CPU_LEVEL >= 6
- // Modification of PGE,PAE,PSE,PCIDE flushes TLB cache according to docs.
+ // Modification of PGE,PAE,PSE,PCIDE,SMEP flushes TLB cache according to docs.
if ((val & BX_CR4_FLUSH_TLB_MASK) != (BX_CPU_THIS_PTR cr4.val32 & BX_CR4_FLUSH_TLB_MASK)) {
- // reload PDPTR if PGE,PAE or PSE changed
+ // reload PDPTR if needed
if (BX_CPU_THIS_PTR cr0.get_PG() && (val & BX_CR4_PAE_MASK) != 0 && !long_mode()) {
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
BX_ERROR(("SetCR4(): PDPTR check failed !"));
diff --git a/bochs/cpu/crregs.h b/bochs/cpu/crregs.h
index ec6d2b08b..1a19c5633 100644
--- a/bochs/cpu/crregs.h
+++ b/bochs/cpu/crregs.h
@@ -124,7 +124,7 @@ struct bx_cr4_t {
};
#define BX_CR4_FLUSH_TLB_MASK \
- (BX_CR4_PSE_MASK | BX_CR4_PAE_MASK | BX_CR4_PGE_MASK | BX_CR4_PCIDE_MASK)
+ (BX_CR4_PSE_MASK | BX_CR4_PAE_MASK | BX_CR4_PGE_MASK | BX_CR4_PCIDE_MASK | BX_CR4_SMEP_MASK)
#endif // #if BX_CPU_LEVEL >= 4
diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc
index 263bade56..c5e28f6a8 100644
--- a/bochs/cpu/paging.cc
+++ b/bochs/cpu/paging.cc
@@ -496,9 +496,16 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsig
unsigned isWrite = rw & 1;
error_code |= (user << 2) | (isWrite << 1);
+
+#if BX_CPU_LEVEL >= 6
+ if (rw == BX_EXECUTE) {
+ if (BX_CPU_THIS_PTR cr4.get_SMEP())
+ error_code |= ERROR_CODE_ACCESS; // I/D = 1
#if BX_SUPPORT_X86_64
- if (BX_CPU_THIS_PTR cr4.get_PAE() && BX_CPU_THIS_PTR efer.get_NXE() && rw == BX_EXECUTE)
- error_code |= ERROR_CODE_ACCESS; // I/D = 1
+ if (BX_CPU_THIS_PTR cr4.get_PAE() && BX_CPU_THIS_PTR efer.get_NXE())
+ error_code |= ERROR_CODE_ACCESS;
+#endif
+ }
#endif
#if BX_SUPPORT_VMX
@@ -647,13 +654,12 @@ int BX_CPU_C::check_entry_PAE(const char *s, Bit64u entry, Bit64u reserved, unsi
#if BX_SUPPORT_X86_64
// Translate a linear address to a physical address in long mode
-bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw)
+bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw)
{
bx_phy_address entry_addr[4];
bx_phy_address ppf = BX_CPU_THIS_PTR cr3 & BX_CR3_PAGING_MASK;
Bit64u entry[4];
bx_bool nx_fault = 0;
- unsigned pl = (curr_pl == 3);
int leaf = BX_LEVEL_PTE;
combined_access = 0x06;
@@ -671,7 +677,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
Bit64u curr_entry = entry[leaf];
int fault = check_entry_PAE(bx_paging_level[leaf], curr_entry, PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
- page_fault(fault, laddr, pl, rw);
+ page_fault(fault, laddr, user, rw);
combined_access &= curr_entry & 0x06; // U/S and R/W
ppf = curr_entry & BX_CONST64(0x000ffffffffff000);
@@ -681,13 +687,13 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
if (curr_entry & 0x80) {
if (leaf > (BX_LEVEL_PDE + !!bx_cpuid_support_1g_paging())) {
BX_DEBUG(("%s: PS bit set !"));
- page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
if (leaf == BX_LEVEL_PDPE) {
if (curr_entry & PAGING_PAE_PDPTE1G_RESERVED_BITS) {
BX_DEBUG(("PAE PDPE1G: reserved bit is set: PDPE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
- page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
@@ -699,7 +705,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
if (leaf == BX_LEVEL_PDE) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
- page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
@@ -713,11 +719,16 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
bx_bool isWrite = (rw & 1); // write or r-m-w
unsigned priv_index = (BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
- (pl<<3) | // bit 3
+ (user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index] || nx_fault)
- page_fault(ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+
+ if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
+ if (combined_access & 0x4) // User page
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+ }
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (entry[leaf] & 0x100); // G
@@ -808,18 +819,17 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(Bit64u *pdptr)
#endif
// Translate a linear address to a physical address in PAE paging mode
-bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw)
+bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw)
{
bx_phy_address entry_addr[3], ppf;
Bit64u entry[3];
bx_bool nx_fault = 0;
- unsigned pl = (curr_pl == 3);
int leaf = BX_LEVEL_PTE;
combined_access = 0x06;
#if BX_SUPPORT_X86_64
if (long_mode()) {
- return translate_linear_long_mode(laddr, lpf_mask, combined_access, curr_pl, rw);
+ return translate_linear_long_mode(laddr, lpf_mask, combined_access, user, rw);
}
#endif
@@ -835,7 +845,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
int fault = check_entry_PAE("PDPE", entry[BX_LEVEL_PDPE], PAGING_PAE_PDPTE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
- page_fault(fault, laddr, pl, rw);
+ page_fault(fault, laddr, user, rw);
entry_addr[BX_LEVEL_PDE] = (bx_phy_address)((entry[BX_LEVEL_PDPE] & BX_CONST64(0x000ffffffffff000))
| ((laddr & 0x3fe00000) >> 18));
@@ -850,7 +860,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
fault = check_entry_PAE("PDE", entry[BX_LEVEL_PDE], PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
- page_fault(fault, laddr, pl, rw);
+ page_fault(fault, laddr, user, rw);
combined_access &= entry[BX_LEVEL_PDE] & 0x06; // U/S and R/W
@@ -858,7 +868,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
if (entry[BX_LEVEL_PDE] & 0x80) {
if (entry[BX_LEVEL_PDE] & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(entry[BX_LEVEL_PDE]), GET32L(entry[BX_LEVEL_PDE])));
- page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
ppf = (bx_phy_address)((entry[BX_LEVEL_PDE] & BX_CONST64(0x000fffffffe00000)) | (laddr & 0x001ff000));
@@ -880,7 +890,7 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
fault = check_entry_PAE("PTE", entry[BX_LEVEL_PTE], PAGING_PAE_RESERVED_BITS, rw, &nx_fault);
if (fault >= 0)
- page_fault(fault, laddr, pl, rw);
+ page_fault(fault, laddr, user, rw);
combined_access &= entry[BX_LEVEL_PTE] & 0x06; // U/S and R/W
@@ -892,11 +902,16 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
bx_bool isWrite = (rw & 1); // write or r-m-w
unsigned priv_index = (BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
- (pl<<3) | // bit 3
+ (user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index] || nx_fault)
- page_fault(ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+
+ if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
+ if (combined_access & 0x4) // User page
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+ }
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (entry[leaf] & 0x100); // G
@@ -946,17 +961,20 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
(((1 << (41-BX_PHY_ADDRESS_WIDTH))-1) << (13 + BX_PHY_ADDRESS_WIDTH - 32))
// Translate a linear address to a physical address
-bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
+bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned user, unsigned rw)
{
Bit32u combined_access = 0x06;
Bit32u lpf_mask = 0xfff; // 4K pages
unsigned priv_index;
+#if BX_SUPPORT_X86_64
+ if (! long_mode()) laddr &= 0xffffffff;
+#endif
+
// note - we assume physical memory < 4gig so for brevity & speed, we'll use
// 32 bit entries although cr3 is expanded to 64 bits.
bx_phy_address paddress, ppf, poffset = PAGE_OFFSET(laddr);
bx_bool isWrite = rw & 1; // write or r-m-w
- unsigned pl = (curr_pl == 3);
InstrTLB_Increment(tlbLookups);
InstrTLB_Stats();
@@ -971,7 +989,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
paddress = tlbEntry->ppf | poffset;
bx_bool isExecute = (rw == BX_EXECUTE);
- if (! (tlbEntry->accessBits & ((isExecute<<2) | (isWrite<<1) | pl)))
+ if (! (tlbEntry->accessBits & ((isExecute<<2) | (isWrite<<1) | user)))
return paddress;
// The current access does not have permission according to the info
@@ -988,7 +1006,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
#if BX_CPU_LEVEL >= 6
if (BX_CPU_THIS_PTR cr4.get_PAE()) {
- ppf = translate_linear_PAE(laddr, lpf_mask, combined_access, curr_pl, rw);
+ ppf = translate_linear_PAE(laddr, lpf_mask, combined_access, user, rw);
}
else
#endif
@@ -1008,7 +1026,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
if (!(pde & 0x1)) {
BX_DEBUG(("PDE: entry not present"));
- page_fault(ERROR_NOT_PRESENT, laddr, pl, rw);
+ page_fault(ERROR_NOT_PRESENT, laddr, user, rw);
}
#if BX_CPU_LEVEL >= 5
@@ -1016,7 +1034,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
// 4M paging, only if CR4.PSE enabled, ignore PDE.PS otherwise
if (pde & PAGING_PDE4M_RESERVED_BITS) {
BX_DEBUG(("PSE PDE4M: reserved bit is set: PDE=0x%08x", pde));
- page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Combined access is just access from the pde (no pte involved).
@@ -1024,13 +1042,18 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
priv_index =
(BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
- (pl<<3) | // bit 3
+ (user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index])
- page_fault(ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
#if BX_CPU_LEVEL >= 6
+ if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
+ if (combined_access & 0x4) // User page
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+ }
+
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= pde & 0x100; // G
#endif
@@ -1065,7 +1088,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
if (!(pte & 0x1)) {
BX_DEBUG(("PTE: entry not present"));
- page_fault(ERROR_NOT_PRESENT, laddr, pl, rw);
+ page_fault(ERROR_NOT_PRESENT, laddr, user, rw);
}
// 386 and 486+ have different behaviour for combining
@@ -1081,13 +1104,18 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
#if BX_CPU_LEVEL >= 4
(BX_CPU_THIS_PTR cr0.get_WP() << 4) | // bit 4
#endif
- (pl<<3) | // bit 3
+ (user<<3) | // bit 3
(combined_access | isWrite); // bit 2,1,0
if (!priv_check[priv_index])
- page_fault(ERROR_PROTECTION, laddr, pl, rw);
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
#if BX_CPU_LEVEL >= 6
+ if (BX_CPU_THIS_PTR cr4.get_SMEP() && rw == BX_EXECUTE && !user) {
+ if (combined_access & 0x4) // User page
+ page_fault(ERROR_PROTECTION, laddr, user, rw);
+ }
+
if (BX_CPU_THIS_PTR cr4.get_PGE())
combined_access |= (pte & 0x100); // G
#endif
@@ -1571,7 +1599,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
/* check for reference across multiple pages */
if ((pageOffset + len) <= 4096) {
// Access within single page.
- BX_CPU_THIS_PTR address_xlation.paddress1 = dtranslate_linear(laddr, curr_pl, BX_WRITE);
+ BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl==3), BX_WRITE);
BX_CPU_THIS_PTR address_xlation.pages = 1;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, BX_CPU_THIS_PTR address_xlation.paddress1, len, BX_WRITE);
@@ -1582,8 +1610,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
}
else {
// access across 2 pages
- BX_CPU_THIS_PTR address_xlation.paddress1 =
- dtranslate_linear(laddr, curr_pl, BX_WRITE);
+ BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), BX_WRITE);
BX_CPU_THIS_PTR address_xlation.len1 = 4096 - pageOffset;
BX_CPU_THIS_PTR address_xlation.len2 = len - BX_CPU_THIS_PTR address_xlation.len1;
BX_CPU_THIS_PTR address_xlation.pages = 2;
@@ -1591,7 +1618,7 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
#if BX_SUPPORT_X86_64
if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */
#endif
- BX_CPU_THIS_PTR address_xlation.paddress2 = dtranslate_linear(laddr2, curr_pl, BX_WRITE);
+ BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(laddr2, (curr_pl == 3), BX_WRITE);
#ifdef BX_LITTLE_ENDIAN
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr,
@@ -1646,7 +1673,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
/* check for reference across multiple pages */
if ((pageOffset + len) <= 4096) {
// Access within single page.
- BX_CPU_THIS_PTR address_xlation.paddress1 = dtranslate_linear(laddr, curr_pl, xlate_rw);
+ BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), xlate_rw);
BX_CPU_THIS_PTR address_xlation.pages = 1;
access_read_physical(BX_CPU_THIS_PTR address_xlation.paddress1, len, data);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr,
@@ -1657,8 +1684,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
}
else {
// access across 2 pages
- BX_CPU_THIS_PTR address_xlation.paddress1 =
- dtranslate_linear(laddr, curr_pl, xlate_rw);
+ BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(laddr, (curr_pl == 3), xlate_rw);
BX_CPU_THIS_PTR address_xlation.len1 = 4096 - pageOffset;
BX_CPU_THIS_PTR address_xlation.len2 = len - BX_CPU_THIS_PTR address_xlation.len1;
BX_CPU_THIS_PTR address_xlation.pages = 2;
@@ -1666,7 +1692,7 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
#if BX_SUPPORT_X86_64
if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */
#endif
- BX_CPU_THIS_PTR address_xlation.paddress2 = dtranslate_linear(laddr2, curr_pl, xlate_rw);
+ BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(laddr2, (curr_pl == 3), xlate_rw);
#ifdef BX_LITTLE_ENDIAN
access_read_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc
index f1f49bd46..462fb8e93 100644
--- a/bochs/cpu/proc_ctrl.cc
+++ b/bochs/cpu/proc_ctrl.cc
@@ -209,7 +209,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
#if BX_INSTRUMENTATION
bx_phy_address paddr =
#endif
- A20ADDR(dtranslate_linear(laddr, CPL, BX_READ));
+ A20ADDR(translate_linear(laddr, USER_PL, BX_READ));
BX_INSTR_CLFLUSH(BX_CPU_ID, laddr, paddr);
@@ -533,7 +533,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
}
}
- bx_phy_address paddr = A20ADDR(dtranslate_linear(laddr, CPL, BX_READ));
+ bx_phy_address paddr = A20ADDR(translate_linear(laddr, USER_PL, BX_READ));
// Set the monitor immediately. If monitor is still armed when we MWAIT,
// the processor will stall.
diff --git a/bochs/cpu/tasking.cc b/bochs/cpu/tasking.cc
index 81ad3bc73..88f25eaed 100644
--- a/bochs/cpu/tasking.cc
+++ b/bochs/cpu/tasking.cc
@@ -188,8 +188,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
// used in the task switch are paged in.
if (BX_CPU_THIS_PTR cr0.get_PG())
{
- dtranslate_linear(nbase32, 0, BX_READ); // old TSS
- dtranslate_linear(nbase32 + new_TSS_max, 0, BX_READ);
+ translate_linear(nbase32, 0, BX_READ); // old TSS
+ translate_linear(nbase32 + new_TSS_max, 0, BX_READ);
// ??? Humm, we check the new TSS region with READ above,
// but sometimes we need to write the link field in that
@@ -200,8 +200,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (source == BX_TASK_FROM_CALL || source == BX_TASK_FROM_INT)
{
- dtranslate_linear(nbase32, 0, BX_WRITE);
- dtranslate_linear(nbase32 + 1, 0, BX_WRITE);
+ translate_linear(nbase32, 0, BX_WRITE);
+ translate_linear(nbase32 + 1, 0, BX_WRITE);
}
}
@@ -242,8 +242,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (BX_CPU_THIS_PTR tr.cache.type <= 3) {
// check that we won't page fault while writing
if (BX_CPU_THIS_PTR cr0.get_PG()) {
- dtranslate_linear(Bit32u(obase32 + 14), 0, BX_WRITE);
- dtranslate_linear(Bit32u(obase32 + 41), 0, BX_WRITE);
+ translate_linear(Bit32u(obase32 + 14), 0, BX_WRITE);
+ translate_linear(Bit32u(obase32 + 41), 0, BX_WRITE);
}
system_write_word(Bit32u(obase32 + 14), IP);
@@ -269,8 +269,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
else {
// check that we won't page fault while writing
if (BX_CPU_THIS_PTR cr0.get_PG()) {
- dtranslate_linear(Bit32u(obase32 + 0x20), 0, BX_WRITE);
- dtranslate_linear(Bit32u(obase32 + 0x5d), 0, BX_WRITE);
+ translate_linear(Bit32u(obase32 + 0x20), 0, BX_WRITE);
+ translate_linear(Bit32u(obase32 + 0x5d), 0, BX_WRITE);
}
system_write_dword(Bit32u(obase32 + 0x20), EIP);
diff --git a/bochs/doc/docbook/user/user.dbk b/bochs/doc/docbook/user/user.dbk
index 7f22ff905..dcf45b88a 100644
--- a/bochs/doc/docbook/user/user.dbk
+++ b/bochs/doc/docbook/user/user.dbk
@@ -3119,6 +3119,11 @@ This option exists only if Bochs compiled with x86-64 support.
Enable Process-Context Identifiers (PCID) support in long mode.
This option exists only if Bochs compiled with x86-64 support.
+smep
+
+Enable Supervisor Mode Execution Protection (SMEP) support.
+This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
+
mwait
Select MONITOR/MWAIT instructions support.
diff --git a/bochs/doc/man/bochsrc.5 b/bochs/doc/man/bochsrc.5
index 2133cb8b1..d32307777 100644
--- a/bochs/doc/man/bochsrc.5
+++ b/bochs/doc/man/bochsrc.5
@@ -244,6 +244,11 @@ pcid:
Enable Process-Context Identifiers (PCID) support in long mode.
This option exists only if Bochs compiled with x86-64 support.
+smep:
+
+Enable Supervisor Mode Execution Protection (SMEP) support.
+This option exists only if Bochs compiled with BX_CPU_LEVEL >= 6.
+
mwait:
Select MONITOR/MWAIT instructions support.
diff --git a/bochs/param_names.h b/bochs/param_names.h
index f18dd4fc1..1c5ae03ba 100644
--- a/bochs/param_names.h
+++ b/bochs/param_names.h
@@ -60,6 +60,7 @@
#define BXPN_CPUID_1G_PAGES "cpuid.1g_pages"
#define BXPN_CPUID_PCID "cpuid.pcid"
#define BXPN_CPUID_FSGSBASE "cpuid.fsgsbase"
+#define BXPN_CPUID_SMEP "cpuid.smep"
#define BXPN_MEM_SIZE "memory.standard.ram.size"
#define BXPN_HOST_MEM_SIZE "memory.standard.ram.host_size"
#define BXPN_ROM_PATH "memory.standard.rom.path"