Fixed except_chk issue in more clean way - added 3 new methods for pushing to new, still not loaded stack
This commit is contained in:
parent
4ec7f5df39
commit
0fc32d3c81
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: access.cc,v 1.72 2007-10-17 18:09:42 sshwarts Exp $
|
||||
// $Id: access.cc,v 1.73 2007-10-19 10:14:33 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -38,8 +38,7 @@
|
||||
#endif
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
unsigned length)
|
||||
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
|
||||
{
|
||||
Bit32u upper_limit;
|
||||
|
||||
@ -127,8 +126,7 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
unsigned length)
|
||||
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
|
||||
{
|
||||
Bit32u upper_limit;
|
||||
|
||||
@ -211,8 +209,7 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
unsigned length)
|
||||
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
|
||||
{
|
||||
Bit32u upper_limit;
|
||||
|
||||
@ -276,7 +273,7 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset,
|
||||
if ((offset <= seg->cache.u.segment.limit_scaled) ||
|
||||
(offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
|
||||
{
|
||||
BX_ERROR(("read_virtual_checks(): read beyond limit ED"));
|
||||
BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
|
||||
exception(int_number(seg), 0, 0);
|
||||
}
|
||||
break;
|
||||
@ -315,10 +312,15 @@ BX_CPU_C::strseg(bx_segment_reg_t *seg)
|
||||
|
||||
int BX_CPU_C::int_number(bx_segment_reg_t *seg)
|
||||
{
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS])
|
||||
return(BX_SS_EXCEPTION);
|
||||
else
|
||||
return(BX_GP_EXCEPTION);
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[0]) return BX_GP_EXCEPTION;
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[1]) return BX_GP_EXCEPTION;
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[2]) return BX_SS_EXCEPTION;
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[3]) return BX_GP_EXCEPTION;
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[4]) return BX_GP_EXCEPTION;
|
||||
if (seg == &BX_CPU_THIS_PTR sregs[5]) return BX_GP_EXCEPTION;
|
||||
|
||||
// imdefined segment, this must be new stack segment
|
||||
return BX_SS_EXCEPTION;
|
||||
}
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
@ -1316,3 +1318,109 @@ BX_CPU_C::write_virtual_tword(unsigned s, bx_address offset, floatx80 *data)
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
//
|
||||
// Write data to new stack, these methods are required for emulation
|
||||
// correctness but not performance critical.
|
||||
//
|
||||
|
||||
// assuming the write happens in legacy mode
|
||||
void BX_CPU_C::write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit16u data)
|
||||
{
|
||||
bx_address laddr;
|
||||
|
||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if (offset < seg->cache.u.segment.limit_scaled) {
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_WRITE);
|
||||
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
|
||||
if (user && BX_CPU_THIS_PTR alignment_check) {
|
||||
if (laddr & 1) {
|
||||
BX_ERROR(("write_new_stack_word(): misaligned access"));
|
||||
exception(BX_AC_EXCEPTION, 0, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if BX_SupportGuest2HostTLB
|
||||
Bit16u *hostAddr = v2h_write_word(laddr, user);
|
||||
if (hostAddr) {
|
||||
WriteHostWordToLittleEndian(hostAddr, data);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
access_linear(laddr, 2, user, BX_WRITE, (void *) &data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 2);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
// assuming the write happens in legacy mode
|
||||
void BX_CPU_C::write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit32u data)
|
||||
{
|
||||
bx_address laddr;
|
||||
|
||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
|
||||
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
|
||||
if (user && BX_CPU_THIS_PTR alignment_check) {
|
||||
if (laddr & 3) {
|
||||
BX_ERROR(("write_new_stack_dword(): misaligned access"));
|
||||
exception(BX_AC_EXCEPTION, 0, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if BX_SupportGuest2HostTLB
|
||||
Bit32u *hostAddr = v2h_write_dword(laddr, user);
|
||||
if (hostAddr) {
|
||||
WriteHostDWordToLittleEndian(hostAddr, data);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
access_linear(laddr, 4, user, BX_WRITE, (void *) &data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 4);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
// assuming the write happens in 64-bit mode
|
||||
void BX_CPU_C::write_new_stack_qword(bx_address offset, Bit64u data)
|
||||
{
|
||||
bx_address laddr = offset;
|
||||
|
||||
if (IsCanonical(offset)) {
|
||||
unsigned pl = (CPL==3);
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
|
||||
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
|
||||
if (pl && BX_CPU_THIS_PTR alignment_check) {
|
||||
if (laddr & 7) {
|
||||
BX_ERROR(("write_new_stack_qword(): misaligned access"));
|
||||
exception(BX_AC_EXCEPTION, 0, 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if BX_SupportGuest2HostTLB
|
||||
Bit64u *hostAddr = v2h_write_qword(laddr, pl);
|
||||
if (hostAddr) {
|
||||
WriteHostQWordToLittleEndian(hostAddr, data);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
access_linear(laddr, 8, pl, BX_WRITE, (void *) &data);
|
||||
}
|
||||
else {
|
||||
BX_ERROR(("write_new_stack_qword(): canonical failure 0x%08x:%08x", GET32H(laddr), GET32L(laddr)));
|
||||
exception(BX_SS_EXCEPTION, 0, 0);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// $Id: call_far.cc,v 1.17 2007-10-18 22:44:38 sshwarts Exp $
|
||||
// $Id: call_far.cc,v 1.18 2007-10-19 10:14:33 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -368,19 +368,58 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
|
||||
}
|
||||
}
|
||||
|
||||
// The follwoing push instructions might have a page fault which cannot
|
||||
// be detected at this stage
|
||||
BX_CPU_THIS_PTR except_chk = 1;
|
||||
BX_CPU_THIS_PTR except_cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
||||
BX_CPU_THIS_PTR except_ss = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
||||
// Prepare new stack segment
|
||||
bx_segment_reg_t new_stack;
|
||||
new_stack.selector = ss_selector;
|
||||
new_stack.cache = ss_descriptor;
|
||||
new_stack.selector.rpl = ss_descriptor.dpl;
|
||||
// add cpl to the selector value
|
||||
new_stack.selector.value = (0xfffc & new_stack.selector.value) |
|
||||
new_stack.selector.rpl;
|
||||
bx_bool user = (cs_descriptor.dpl == 3);
|
||||
|
||||
if (ss_descriptor.u.segment.d_b)
|
||||
temp_ESP = ESP_for_cpl_x;
|
||||
else
|
||||
temp_ESP = (Bit16u) ESP_for_cpl_x;
|
||||
|
||||
// push pointer of old stack onto new stack
|
||||
if (gate_descriptor.type==BX_386_CALL_GATE) {
|
||||
write_new_stack_dword(&new_stack, temp_ESP-4, user, return_SS);
|
||||
write_new_stack_dword(&new_stack, temp_ESP-8, user, return_ESP);
|
||||
temp_ESP -= 8;
|
||||
|
||||
for (unsigned i=param_count; i>0; i--) {
|
||||
temp_ESP -= 4;
|
||||
write_new_stack_dword(&new_stack, temp_ESP, user, parameter_dword[i-1]);
|
||||
}
|
||||
// push return address onto new stack
|
||||
write_new_stack_dword(&new_stack, temp_ESP-4, user, return_CS);
|
||||
write_new_stack_dword(&new_stack, temp_ESP-8, user, return_EIP);
|
||||
temp_ESP -= 8;
|
||||
}
|
||||
else {
|
||||
write_new_stack_word(&new_stack, temp_ESP-2, user, return_SS);
|
||||
write_new_stack_word(&new_stack, temp_ESP-4, user, (Bit16u) return_ESP);
|
||||
temp_ESP -= 4;
|
||||
|
||||
for (unsigned i=param_count; i>0; i--) {
|
||||
temp_ESP -= 2;
|
||||
write_new_stack_word(&new_stack, temp_ESP, user, parameter_word[i-1]);
|
||||
}
|
||||
// push return address onto new stack
|
||||
write_new_stack_word(&new_stack, temp_ESP-2, user, return_CS);
|
||||
write_new_stack_word(&new_stack, temp_ESP-4, user, (Bit16u) return_EIP);
|
||||
temp_ESP -= 4;
|
||||
}
|
||||
|
||||
/* load new SS:SP value from TSS */
|
||||
/* load SS descriptor */
|
||||
load_ss(&ss_selector, &ss_descriptor, ss_descriptor.dpl);
|
||||
if (ss_descriptor.u.segment.d_b)
|
||||
ESP = ESP_for_cpl_x;
|
||||
ESP = temp_ESP;
|
||||
else
|
||||
SP = (Bit16u) ESP_for_cpl_x;
|
||||
SP = (Bit16u) temp_ESP;
|
||||
|
||||
/* load new CS:IP value from gate */
|
||||
/* load CS descriptor */
|
||||
@ -388,43 +427,6 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
|
||||
/* set RPL of CS to CPL */
|
||||
load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
|
||||
EIP = new_EIP;
|
||||
|
||||
// push pointer of old stack onto new stack
|
||||
if (gate_descriptor.type==BX_386_CALL_GATE) {
|
||||
push_32(return_SS);
|
||||
push_32(return_ESP);
|
||||
}
|
||||
else {
|
||||
push_16(return_SS);
|
||||
push_16((Bit16u) return_ESP);
|
||||
}
|
||||
|
||||
/* get word count from call gate, mask to 5 bits */
|
||||
/* copy parameters from old stack onto new stack */
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
||||
temp_ESP = ESP;
|
||||
else
|
||||
temp_ESP = SP;
|
||||
|
||||
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
||||
for (unsigned i=param_count; i>0; i--) {
|
||||
push_16(parameter_word[i-1]);
|
||||
}
|
||||
// push return address onto new stack
|
||||
push_16(return_CS);
|
||||
push_16((Bit16u) return_EIP);
|
||||
}
|
||||
else {
|
||||
for (unsigned i=param_count; i>0; i--) {
|
||||
push_32(parameter_dword[i-1]);
|
||||
}
|
||||
// push return address onto new stack
|
||||
push_32(return_CS);
|
||||
push_32(return_EIP);
|
||||
}
|
||||
|
||||
// Help for OS/2
|
||||
BX_CPU_THIS_PTR except_chk = 0;
|
||||
}
|
||||
else // CALL GATE TO SAME PRIVILEGE
|
||||
{
|
||||
@ -513,7 +515,7 @@ BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
|
||||
exception(BX_NP_EXCEPTION, dest_selector & 0xfffc, 0);
|
||||
}
|
||||
|
||||
Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
||||
Bit64u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
||||
Bit64u old_RIP = RIP;
|
||||
|
||||
// CALL GATE TO MORE PRIVILEGE
|
||||
@ -533,9 +535,18 @@ BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
|
||||
exception(BX_GP_EXCEPTION, 0, 0);
|
||||
}
|
||||
|
||||
Bit16u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
||||
Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
||||
Bit64u old_RSP = RSP;
|
||||
|
||||
// push old stack long pointer onto new stack
|
||||
write_new_stack_qword(RSP_for_cpl_x - 8, old_SS);
|
||||
write_new_stack_qword(RSP_for_cpl_x - 16, old_RSP);
|
||||
// push long pointer to return address onto new stack
|
||||
write_new_stack_qword(RSP_for_cpl_x - 24, old_CS);
|
||||
write_new_stack_qword(RSP_for_cpl_x - 32, old_RIP);
|
||||
RSP_for_cpl_x -= 32;
|
||||
|
||||
// prepare new stack null SS selector
|
||||
bx_selector_t ss_selector;
|
||||
bx_descriptor_t ss_descriptor;
|
||||
|
||||
@ -543,27 +554,12 @@ BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
|
||||
parse_selector(0, &ss_selector);
|
||||
parse_descriptor(0, 0, &ss_descriptor);
|
||||
|
||||
// The following push instructions might have a page fault which cannot
|
||||
// be detected at this stage
|
||||
BX_CPU_THIS_PTR except_chk = 1;
|
||||
BX_CPU_THIS_PTR except_cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
||||
BX_CPU_THIS_PTR except_ss = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
||||
|
||||
// load CS:RIP (guaranteed to be in 64 bit mode)
|
||||
branch_far64(&cs_selector, &cs_descriptor, new_RIP, cs_descriptor.dpl);
|
||||
|
||||
// set up null SS descriptor
|
||||
load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
|
||||
RSP = RSP_for_cpl_x;
|
||||
|
||||
// push old stack long pointer onto new stack
|
||||
push_64(old_SS);
|
||||
push_64(old_RSP);
|
||||
// push long pointer to return address onto new stack
|
||||
push_64(old_CS);
|
||||
push_64(old_RIP);
|
||||
|
||||
BX_CPU_THIS_PTR except_chk = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -576,20 +572,12 @@ BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
|
||||
exception(BX_GP_EXCEPTION, 0, 0);
|
||||
}
|
||||
|
||||
// The following push instructions might have a page fault which cannot
|
||||
// be detected at this stage
|
||||
BX_CPU_THIS_PTR except_chk = 1;
|
||||
BX_CPU_THIS_PTR except_cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
||||
BX_CPU_THIS_PTR except_ss = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
||||
|
||||
// load CS:RIP (guaranteed to be in 64 bit mode)
|
||||
branch_far64(&cs_selector, &cs_descriptor, new_RIP, CPL);
|
||||
|
||||
// push return address onto stack
|
||||
push_64(old_CS);
|
||||
push_64(old_RIP);
|
||||
|
||||
BX_CPU_THIS_PTR except_chk = 0;
|
||||
// load CS:RIP (guaranteed to be in 64 bit mode)
|
||||
branch_far64(&cs_selector, &cs_descriptor, new_RIP, CPL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: cpu.h,v 1.336 2007-10-18 22:44:38 sshwarts Exp $
|
||||
// $Id: cpu.h,v 1.337 2007-10-19 10:14:33 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -1177,10 +1177,6 @@ public: // for now...
|
||||
bx_segment_reg_t save_ss;
|
||||
bx_address save_eip;
|
||||
bx_address save_esp;
|
||||
// This help for OS/2
|
||||
bx_bool except_chk;
|
||||
bx_segment_reg_t except_cs;
|
||||
bx_segment_reg_t except_ss;
|
||||
|
||||
// Boundaries of current page, based on EIP
|
||||
bx_address eipPageBias;
|
||||
@ -2827,6 +2823,12 @@ public: // for now...
|
||||
#if BX_SUPPORT_FPU
|
||||
BX_SMF void read_virtual_tword(unsigned seg, bx_address offset, floatx80 *data) BX_CPP_AttrRegparmN(3);
|
||||
#endif
|
||||
// write of word/dword to new stack could happen only in legacy mode
|
||||
BX_SMF void write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit16u data);
|
||||
BX_SMF void write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit32u data);
|
||||
// write of qword to new stack could happen only in 64-bit mode
|
||||
// (so stack segment is not relavant)
|
||||
BX_SMF void write_new_stack_qword(bx_address offset, Bit64u data);
|
||||
|
||||
#if BX_SUPPORT_MISALIGNED_SSE
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: exception.cc,v 1.91 2007-10-18 21:27:56 sshwarts Exp $
|
||||
// $Id: exception.cc,v 1.92 2007-10-19 10:14:33 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -197,22 +197,13 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
|
||||
parse_selector(0, &ss_selector);
|
||||
parse_descriptor(0, 0, &ss_descriptor);
|
||||
|
||||
// The follwoing push instructions might have a page fault which cannot
|
||||
// be detected at this stage
|
||||
BX_CPU_THIS_PTR except_chk = 1;
|
||||
BX_CPU_THIS_PTR except_cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
||||
BX_CPU_THIS_PTR except_ss = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
||||
|
||||
// load CS:RIP (guaranteed to be in 64 bit mode)
|
||||
branch_far64(&cs_selector, &cs_descriptor, gate_dest_offset, cs_descriptor.dpl);
|
||||
|
||||
// set up null SS descriptor
|
||||
load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
|
||||
|
||||
RSP = RSP_for_cpl_x;
|
||||
|
||||
/* the size of the gate controls the size of the stack pushes */
|
||||
|
||||
// push old stack long pointer onto new stack
|
||||
push_64(old_SS);
|
||||
push_64(old_RSP);
|
||||
@ -223,8 +214,6 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
|
||||
if (is_error_code)
|
||||
push_64(error_code);
|
||||
|
||||
BX_CPU_THIS_PTR except_chk = 0;
|
||||
|
||||
// if INTERRUPT GATE set IF to 0
|
||||
if (!(gate_descriptor.type & 1)) // even is int-gate
|
||||
BX_CPU_THIS_PTR clear_IF();
|
||||
@ -277,8 +266,7 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
|
||||
}
|
||||
|
||||
// else #GP(CS selector + ext)
|
||||
BX_ERROR(("interrupt(long mode): bad descriptor"));
|
||||
BX_ERROR(("type=%u, descriptor.dpl=%u, CPL=%u",
|
||||
BX_ERROR(("interrupt(long mode): bad descriptor type=%u, descriptor.dpl=%u, CPL=%u",
|
||||
(unsigned) cs_descriptor.type, (unsigned) cs_descriptor.dpl, (unsigned) CPL));
|
||||
BX_ERROR(("cs.segment = %u", (unsigned) cs_descriptor.segment));
|
||||
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
|
||||
@ -990,13 +978,6 @@ void BX_CPU_C::exception(unsigned vector, Bit16u error_code, bx_bool is_INT)
|
||||
RIP = BX_CPU_THIS_PTR prev_eip;
|
||||
RSP = BX_CPU_THIS_PTR prev_esp;
|
||||
|
||||
if (BX_CPU_THIS_PTR except_chk) // FIXME: Help with OS/2
|
||||
{
|
||||
BX_CPU_THIS_PTR except_chk = 0;
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS] = BX_CPU_THIS_PTR except_cs;
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS] = BX_CPU_THIS_PTR except_ss;
|
||||
}
|
||||
|
||||
if (vector != BX_DB_EXCEPTION) BX_CPU_THIS_PTR assert_RF();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user