diff --git a/bochs/cpu/access.cc b/bochs/cpu/access.cc index 92ab24179..e1e829d79 100644 --- a/bochs/cpu/access.cc +++ b/bochs/cpu/access.cc @@ -292,16 +292,30 @@ int BX_CPU_C::int_number(unsigned s) } #if BX_SUPPORT_X86_64 -bool BX_CPP_AttrRegparmN(2) BX_CPU_C::IsCanonicalAccess(bx_address laddr, bool user) +bool BX_CPP_AttrRegparmN(3) BX_CPU_C::IsCanonicalAccess(bx_address laddr, unsigned rw, bool user) { if (! IsCanonical(laddr)) { return false; } - if (long64_mode()) { - if (BX_CPU_THIS_PTR cr4.get_LASS()) { - // laddr[63] == 0 user, laddr[63] == 1 supervisor - if ((laddr >> 63) == user) { + if (long64_mode() && BX_CPU_THIS_PTR cr4.get_LASS()) { + // laddr[63] == 0 user, laddr[63] == 1 supervisor + bool access_user_space = (laddr >> 63) == 0; + if (user) { + // When LASS is enabled, linear user accesses to supervisor space are blocked + if (! access_user_space) { + BX_ERROR(("User access LASS canonical violation for address 0x" FMT_LL "x rw=%d", laddr, rw)); + return false; + } + return true; + } + + // A supervisor-mode instruction fetch causes a LASS violation if it would accesses a linear address[63] == 0 + // A supervisor-mode data access causes a LASS violation only if supervisor-mode access protection is enabled + // (CR4.SMAP = 1) and RFLAGS.AC = 0 or the access implicitly accesses a system data structure. + if (rw == BX_EXECUTE || (BX_CPU_THIS_PTR cr4.get_SMAP() && ! BX_CPU_THIS_PTR get_AC())) { + if (access_user_space) { + BX_ERROR(("Supervisor access LASS canonical violation for address 0x" FMT_LL "x rw=%d", laddr, rw)); return false; } } @@ -322,7 +336,7 @@ int BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_p bool user = (curr_pl == 3); #if BX_SUPPORT_X86_64 - if (! IsCanonicalAccess(laddr, user)) { + if (! IsCanonicalAccess(laddr, xlate_rw, user)) { BX_ERROR(("access_read_linear(): canonical failure")); return -1; } @@ -365,7 +379,7 @@ int BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_p #if BX_SUPPORT_X86_64 if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */ else { - if (! IsCanonicalAccess(laddr2, user)) { + if (! IsCanonicalAccess(laddr2, xlate_rw, user)) { BX_ERROR(("access_read_linear(): canonical failure for second half of page split access")); return -1; } @@ -427,7 +441,7 @@ int BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr_ bool user = (curr_pl == 3); #if BX_SUPPORT_X86_64 - if (! IsCanonicalAccess(laddr, user)) { + if (! IsCanonicalAccess(laddr, xlate_rw, user)) { BX_ERROR(("access_write_linear(): canonical failure")); return -1; } @@ -473,7 +487,7 @@ int BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr_ #if BX_SUPPORT_X86_64 if (! long64_mode()) laddr2 &= 0xffffffff; /* handle linear address wrap in legacy mode */ else { - if (! IsCanonicalAccess(laddr2, user)) { + if (! IsCanonicalAccess(laddr2, xlate_rw, user)) { BX_ERROR(("access_write_linear(): canonical failure for second half of page split access")); return -1; } diff --git a/bochs/cpu/access2.cc b/bochs/cpu/access2.cc index a74363648..51b053ecc 100644 --- a/bochs/cpu/access2.cc +++ b/bochs/cpu/access2.cc @@ -315,7 +315,7 @@ BX_CPU_C::tickle_read_linear(unsigned s, bx_address laddr) } #if BX_SUPPORT_X86_64 - if (! IsCanonicalAccess(laddr, USER_PL)) { + if (! IsCanonicalAccess(laddr, BX_READ, USER_PL)) { BX_ERROR(("tickle_read_linear(): canonical failure")); exception(int_number(s), 0); } diff --git a/bochs/cpu/cpu.cc b/bochs/cpu/cpu.cc index 457ba80b3..3805946f3 100644 --- a/bochs/cpu/cpu.cc +++ b/bochs/cpu/cpu.cc @@ -571,7 +571,7 @@ void BX_CPU_C::prefetch(void) #if BX_SUPPORT_X86_64 if (long64_mode()) { - if (! IsCanonicalAccess(RIP, USER_PL)) { + if (! IsCanonicalAccess(RIP, BX_EXECUTE, USER_PL)) { BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary")); exception(BX_GP_EXCEPTION, 0); } diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index 8d30cca48..d2789e0d6 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -4349,7 +4349,7 @@ public: // for now... #if BX_SUPPORT_X86_64 BX_SMF BX_CPP_INLINE bool IsCanonical(bx_address addr) { return IsCanonicalToWidth(addr, BX_CPU_THIS_PTR linaddr_width); } - BX_SMF bool IsCanonicalAccess(bx_address addr, bool user) BX_CPP_AttrRegparmN(2); + BX_SMF bool IsCanonicalAccess(bx_address addr, unsigned rw, bool user) BX_CPP_AttrRegparmN(3); #endif BX_SMF bool write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len, bool align = false) BX_CPP_AttrRegparmN(4); diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc index 0e9a20b30..26d557d95 100644 --- a/bochs/cpu/paging.cc +++ b/bochs/cpu/paging.cc @@ -1457,10 +1457,14 @@ bx_phy_address BX_CPU_C::translate_linear(bx_TLB_entry *tlbEntry, bx_address lad #if BX_SUPPORT_X86_64 if (long64_mode() && BX_CPU_THIS_PTR cr4.get_LASS()) { - if (lpf >> 63) // supervisor, cannot access user pages - tlbEntry->accessBits &= ~(TLB_UserReadOK | TLB_UserWriteOK | TLB_UserReadShadowStackOK | TLB_UserWriteShadowStackOK); - else // user, cannot access supervisor pages - tlbEntry->accessBits &= ~(TLB_SysReadOK | TLB_SysWriteOK | TLB_SysReadShadowStackOK | TLB_SysWriteShadowStackOK); + if (lpf >> 63) { // supervisor, cannot be accessed by user + tlbEntry->accessBits &= ~(TLB_UserReadOK | TLB_UserWriteOK | TLB_UserReadShadowStackOK | TLB_UserWriteShadowStackOK | TLB_UserExecuteOK); + } + else { // user, cannot be executed by supervisor, cannot be accessed by supervisor if CR4.SMAP=1 + tlbEntry->accessBits &= ~(TLB_SysExecuteOK); + if (BX_CPU_THIS_PTR cr4.get_SMAP()) + tlbEntry->accessBits &= ~(TLB_SysReadOK | TLB_SysWriteOK | TLB_SysReadShadowStackOK | TLB_SysWriteShadowStackOK); + } } #endif }