Push error code if segment violation occurs when pushing arguments into a new stack

This commit is contained in:
Stanislav Shwartsman 2008-06-25 02:28:31 +00:00
parent 42b74da7f5
commit c1f308d80d
6 changed files with 97 additions and 74 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access.cc,v 1.111 2008-06-12 19:14:39 sshwarts Exp $
// $Id: access.cc,v 1.112 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -31,7 +31,7 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
void BX_CPP_AttrRegparmN(3)
bx_bool BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
{
Bit32u upper_limit;
@ -40,18 +40,18 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned le
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
// Mark cache as being OK type for succeeding reads/writes
seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
return;
return 1;
}
#endif
if (seg->cache.valid==0) {
BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.p == 0) { /* not present */
BX_ERROR(("write_virtual_checks(): segment not present"));
exception(int_number(seg), 0, 0);
return 0;
}
switch (seg->cache.type) {
@ -62,14 +62,14 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned le
case 12: case 13: // execute only, conforming
case 14: case 15: // execute/read-only, conforming
BX_ERROR(("write_virtual_checks(): no write access to seg"));
exception(int_number(seg), 0, 0);
return 0;
case 2: case 3: /* read/write */
if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
|| (length-1 > seg->cache.u.segment.limit_scaled))
{
BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.u.segment.limit_scaled >= 7) {
// Mark cache as being OK type for succeeding read/writes. The limit
@ -95,16 +95,18 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned le
(offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
{
BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
exception(int_number(seg), 0, 0);
return 0;
}
break;
default:
BX_PANIC(("write_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
}
return 1;
}
void BX_CPP_AttrRegparmN(3)
bx_bool BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
{
Bit32u upper_limit;
@ -113,18 +115,18 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
// Mark cache as being OK type for succeeding reads/writes
seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
return;
return 1;
}
#endif
if (seg->cache.valid==0) {
BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.p == 0) { /* not present */
BX_ERROR(("read_virtual_checks(): segment not present"));
exception(int_number(seg), 0, 0);
return 0;
}
switch (seg->cache.type) {
@ -136,7 +138,7 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len
|| (length-1 > seg->cache.u.segment.limit_scaled))
{
BX_ERROR(("read_virtual_checks(): read beyond limit"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.u.segment.limit_scaled >= 7) {
// Mark cache as being OK type for succeeding reads. See notes for
@ -157,7 +159,7 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len
(offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
{
BX_ERROR(("read_virtual_checks(): read beyond limit ED"));
exception(int_number(seg), 0, 0);
return 0;
}
break;
@ -165,14 +167,16 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len
case 12: case 13: /* execute only, conforming */
/* can't read or write an execute-only segment */
BX_ERROR(("read_virtual_checks(): execute only"));
exception(int_number(seg), 0, 0);
return 0;
default:
BX_PANIC(("read_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
}
return 1;
}
void BX_CPP_AttrRegparmN(3)
bx_bool BX_CPP_AttrRegparmN(3)
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
{
Bit32u upper_limit;
@ -181,18 +185,18 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
// Mark cache as being OK type for succeeding reads/writes
seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
return;
return 1;
}
#endif
if (seg->cache.valid==0) {
BX_DEBUG(("execute_virtual_checks(): segment descriptor not valid"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.p == 0) { /* not present */
BX_ERROR(("execute_virtual_checks(): segment not present"));
exception(int_number(seg), 0, 0);
return 0;
}
switch (seg->cache.type) {
@ -204,7 +208,7 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned
|| (length-1 > seg->cache.u.segment.limit_scaled))
{
BX_ERROR(("execute_virtual_checks(): read beyond limit"));
exception(int_number(seg), 0, 0);
return 0;
}
if (seg->cache.u.segment.limit_scaled >= 7) {
// Mark cache as being OK type for succeeding reads. See notes for
@ -221,7 +225,7 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned
|| (length-1 > seg->cache.u.segment.limit_scaled))
{
BX_ERROR(("execute_virtual_checks(): read beyond limit execute only"));
exception(int_number(seg), 0, 0);
return 0;
}
break;
@ -235,13 +239,15 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned
(offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
{
BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
exception(int_number(seg), 0, 0);
return 0;
}
break;
default:
BX_PANIC(("execute_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
}
return 1;
}
const char *BX_CPU_C::strseg(bx_segment_reg_t *seg)
@ -258,19 +264,6 @@ const char *BX_CPU_C::strseg(bx_segment_reg_t *seg)
}
}
int BX_CPU_C::int_number(bx_segment_reg_t *seg)
{
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]) return BX_GP_EXCEPTION;
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]) return BX_GP_EXCEPTION;
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS]) return BX_SS_EXCEPTION;
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]) return BX_GP_EXCEPTION;
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]) return BX_GP_EXCEPTION;
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS]) return BX_GP_EXCEPTION;
// undefined segment, this must be a new stack segment
return BX_SS_EXCEPTION;
}
int BX_CPU_C::int_number(unsigned s)
{
if (s == BX_SEG_REG_SS)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access32.cc,v 1.2 2008-06-12 20:41:48 sshwarts Exp $
// $Id: access32.cc,v 1.3 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -79,7 +79,9 @@ accessOK:
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 1);
if (!write_virtual_checks(seg, offset, 1))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -133,7 +135,9 @@ accessOK:
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
if (!write_virtual_checks(seg, offset, 2))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -187,7 +191,9 @@ accessOK:
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
if (!write_virtual_checks(seg, offset, 4))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -241,7 +247,9 @@ accessOK:
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
write_virtual_checks(seg, offset, 8);
if (!write_virtual_checks(seg, offset, 8))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -285,7 +293,9 @@ accessOK:
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
read_virtual_checks(seg, offset, 1);
if (!read_virtual_checks(seg, offset, 1))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -337,7 +347,9 @@ accessOK:
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
read_virtual_checks(seg, offset, 2);
if (!read_virtual_checks(seg, offset, 2))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -389,7 +401,9 @@ accessOK:
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
read_virtual_checks(seg, offset, 4);
if (!read_virtual_checks(seg, offset, 4))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -441,7 +455,9 @@ accessOK:
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
read_virtual_checks(seg, offset, 8);
if (!read_virtual_checks(seg, offset, 8))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -494,7 +510,9 @@ accessOK:
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 1);
if (!write_virtual_checks(seg, offset, 1))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -550,7 +568,9 @@ accessOK:
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
if (!write_virtual_checks(seg, offset, 2))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -606,7 +626,9 @@ accessOK:
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
if (!write_virtual_checks(seg, offset, 4))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -662,7 +684,9 @@ accessOK:
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
write_virtual_checks(seg, offset, 8);
if (!write_virtual_checks(seg, offset, 8))
exception(int_number(s), 0, 0);
goto accessOK;
}
@ -882,7 +906,10 @@ accessOK:
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
// add error code when segment violation occurs when pushing into new stack
if (!write_virtual_checks(seg, offset, 2))
exception(BX_SS_EXCEPTION, seg->selector.value & 0xfffc, 0);
goto accessOK;
}
@ -934,6 +961,9 @@ accessOK:
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
// add error code when segment violation occurs when pushing into new stack
if (!write_virtual_checks(seg, offset, 4))
exception(BX_SS_EXCEPTION, seg->selector.value & 0xfffc, 0);
goto accessOK;
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access64.cc,v 1.5 2008-06-12 19:14:39 sshwarts Exp $
// $Id: access64.cc,v 1.6 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -73,7 +73,7 @@ BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
if (! IsCanonical(laddr)) {
BX_ERROR(("write_virtual_byte_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
access_write_linear(laddr, 1, CPL, (void *) &data);
@ -113,7 +113,7 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
if (! IsCanonical(laddr)) {
BX_ERROR(("write_virtual_word_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -162,7 +162,7 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
if (! IsCanonical(laddr)) {
BX_ERROR(("write_virtual_dword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -211,7 +211,7 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
if (! IsCanonical(laddr)) {
BX_ERROR(("write_virtual_qword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -258,7 +258,7 @@ BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_virtual_byte_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
@ -297,7 +297,7 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_virtual_word_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -345,7 +345,7 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_virtual_dword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -393,7 +393,7 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_virtual_qword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -449,7 +449,7 @@ BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_RMW_virtual_byte_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
@ -492,7 +492,7 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_RMW_virtual_word_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -544,7 +544,7 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_RMW_virtual_dword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
@ -596,7 +596,7 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
if (! IsCanonical(laddr)) {
BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
exception(int_number(seg), 0, 0);
exception(int_number(s), 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.490 2008-06-23 02:56:31 sshwarts Exp $
// $Id: cpu.h,v 1.491 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2829,9 +2829,9 @@ public: // for now...
BX_CPU_THIS_PTR eipPageWindowSize = 0;
}
BX_SMF void write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF bx_bool write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF bx_bool read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF bx_bool execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF Bit8u read_virtual_byte_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_virtual_word_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
@ -3073,7 +3073,6 @@ public: // for now...
BX_CPP_AttrNoReturn();
BX_SMF void smram_save_state(Bit32u *smm_saved_state);
BX_SMF bx_bool smram_restore_state(const Bit32u *smm_saved_state);
BX_SMF int int_number(bx_segment_reg_t *seg);
BX_SMF int int_number(unsigned s);
BX_SMF void SetCR0(Bit32u val_32) BX_CPP_AttrRegparmN(1);
BX_SMF void SetCR3(bx_address value) BX_CPP_AttrRegparmN(1);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.141 2008-06-14 16:55:45 sshwarts Exp $
// $Id: paging.cc,v 1.142 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -524,10 +524,10 @@ void BX_CPU_C::TLB_flush(bx_bool invalidateGlobal)
InstrTLB_Increment(tlbNonGlobalFlushes);
#endif
for (unsigned i=0; i<BX_TLB_SIZE; i++) {
for (unsigned n=0; n<BX_TLB_SIZE; n++) {
// To be conscious of the native cache line usage, only
// write to (invalidate) entries which need it.
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[i];
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[n];
if (tlbEntry->lpf != BX_INVALID_TLB_ENTRY) {
#if BX_SUPPORT_GLOBAL_PAGES
if (invalidateGlobal || !(tlbEntry->accessBits & TLB_GlobalPage))

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: proc_ctrl.cc,v 1.244 2008-06-23 15:58:22 sshwarts Exp $
// $Id: proc_ctrl.cc,v 1.245 2008-06-25 02:28:31 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -192,14 +192,15 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
// check if we could access the memory segment
if (!(seg->cache.valid & SegAccessROK4G)) {
execute_virtual_checks(seg, RMAddr(i), 1);
if (! execute_virtual_checks(seg, RMAddr(i), 1))
exception(int_number(i->seg()), 0, 0);
}
bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), RMAddr(i));
#if BX_SUPPORT_X86_64
if (! IsCanonical(laddr)) {
BX_ERROR(("CLFLUSH: non-canonical access !"));
exception(int_number(seg), 0, 0);
exception(int_number(i->seg()), 0, 0);
}
#endif