Fully implemented jump_far and ret_far in 64-bit mode.

Note that I am not sure about 100% correctness, I am just coding Intel specs ...
Code review and massive testing still required.
This commit is contained in:
Stanislav Shwartsman 2005-08-02 18:44:20 +00:00
parent 26f0662199
commit d8ab4e3424
7 changed files with 441 additions and 419 deletions

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: call_far.cc,v 1.2 2005-08-01 22:18:40 sshwarts Exp $
// $Id: call_far.cc,v 1.3 2005-08-02 18:44:15 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -63,7 +63,7 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
if (cs_descriptor.segment) // normal segment
{
check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw));
check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw), CPL);
#if BX_SUPPORT_X86_64
if (i->os64L()) {
@ -107,27 +107,30 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
// descriptor DPL must be >= CPL else #GP(gate selector)
if (gate_descriptor.dpl < CPL) {
BX_ERROR(("jump_protected: descriptor.dpl < CPL"));
BX_ERROR(("call_protected: descriptor.dpl < CPL"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// descriptor DPL must be >= gate selector RPL else #GP(gate selector)
if (gate_descriptor.dpl < gate_selector.rpl) {
BX_ERROR(("jump_protected: descriptor.dpl < selector.rpl"));
BX_ERROR(("call_protected: descriptor.dpl < selector.rpl"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// gate descriptor must be present else #NP(gate selector)
if (! IS_PRESENT(gate_descriptor)) {
BX_ERROR(("call_protected: gate.p == 0"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
}
switch (gate_descriptor.type) {
case BX_SYS_SEGMENT_AVAIL_286_TSS:
case BX_SYS_SEGMENT_AVAIL_386_TSS:
//if (gate_descriptor.type==1)
// BX_INFO(("call_protected: 16bit available TSS"));
//else
// BX_INFO(("call_protected: 32bit available TSS"));
// Task State Seg must be present, else #NP(TSS selector)
// checked in task_switch()
if (gate_descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS)
BX_DEBUG(("call_protected: 16bit available TSS"));
else
BX_DEBUG(("call_protected: 32bit available TSS"));
// SWITCH_TASKS _without_ nesting to TSS
task_switch(&gate_selector, &gate_descriptor,
@ -141,15 +144,8 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
return;
case BX_TASK_GATE:
// task gate must be present else #NP(gate selector)
if (! IS_PRESENT(gate_descriptor)) {
BX_ERROR(("call_protected: task gate.p == 0"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// examine selector to TSS, given in Task Gate descriptor
// must specify global in the local/global bit else #TS(TSS selector)
raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
parse_selector(raw_tss_selector, &tss_selector);
if (tss_selector.ti) {
@ -163,6 +159,7 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
// descriptor AR byte must specify available TSS
// else #TS(TSS selector)
parse_descriptor(dword1, dword2, &tss_descriptor);
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
BX_ERROR(("call_protected: TSS selector points to bad TSS"));
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
@ -197,20 +194,13 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
case BX_286_CALL_GATE:
case BX_386_CALL_GATE:
//if (gate_descriptor.type==BX_286_CALL_GATE)
// BX_INFO(("CALL: 16bit call gate"));
//else
// BX_INFO(("CALL: 32bit call gate"));
// call gate must be present, else #NP(call gate selector)
if (! IS_PRESENT(gate_descriptor)) {
BX_ERROR(("call_protected: not present"));
exception(BX_NP_EXCEPTION, gate_selector.value & 0xfffc, 0);
}
if (gate_descriptor.type == BX_286_CALL_GATE)
BX_DEBUG(("CALL: 16bit call gate"));
else
BX_DEBUG(("CALL: 32bit call gate"));
// examine code segment selector in call gate descriptor
if (gate_descriptor.type==BX_286_CALL_GATE) {
if (gate_descriptor.type == BX_286_CALL_GATE) {
dest_selector = gate_descriptor.u.gate286.dest_selector;
new_EIP = gate_descriptor.u.gate286.dest_offset;
}
@ -257,12 +247,11 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
Bit16u return_SS, return_CS;
Bit32u return_ESP, return_EIP;
Bit32u return_ss_base;
unsigned i;
Bit16u parameter_word[32];
Bit32u parameter_dword[32];
Bit32u temp_ESP;
// BX_INFO(("CALL: Call Gate: to more priviliged level"));
BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
// Help for OS/2
BX_CPU_THIS_PTR except_chk = 1;
@ -367,13 +356,13 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
return_EIP = IP;
if (gate_descriptor.type==BX_286_CALL_GATE) {
for (i=0; i<param_count; i++) {
for (unsigned i=0; i<param_count; i++) {
access_linear(return_ss_base + return_ESP + i*2,
2, 0, BX_READ, &parameter_word[i]);
}
}
else {
for (i=0; i<param_count; i++) {
for (unsigned i=0; i<param_count; i++) {
access_linear(return_ss_base + return_ESP + i*4,
4, 0, BX_READ, &parameter_dword[i]);
}
@ -395,14 +384,14 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
EIP = new_EIP;
// push pointer of old stack onto new stack
if (gate_descriptor.type==BX_286_CALL_GATE) {
push_16(return_SS);
push_16((Bit16u) return_ESP);
}
else {
if (gate_descriptor.type==BX_386_CALL_GATE) {
push_32(return_SS);
push_32(return_ESP);
}
else {
push_16(return_SS);
push_16((Bit16u) return_ESP);
}
/* get word count from call gate, mask to 5 bits */
/* copy parameters from old stack onto new stack */
@ -412,12 +401,12 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
temp_ESP = SP;
if (gate_descriptor.type==BX_286_CALL_GATE) {
for (i=param_count; i>0; i--) {
for (unsigned i=param_count; i>0; i--) {
push_16(parameter_word[i-1]);
}
}
else {
for (i=param_count; i>0; i--) {
for (unsigned i=param_count; i>0; i--) {
push_32(parameter_dword[i-1]);
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.229 2005-08-01 21:40:10 sshwarts Exp $
// $Id: cpu.h,v 1.230 2005-08-02 18:44:15 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2736,6 +2736,12 @@ public: // for now...
BX_SMF void reset(unsigned source);
BX_SMF void jump_protected(bxInstruction_c *, Bit16u cs, bx_address disp) BX_CPP_AttrRegparmN(3);
BX_SMF void jmp_task_gate(bx_descriptor_t *gate_descriptor) BX_CPP_AttrRegparmN(1);
BX_SMF void jmp_call_gate16(bx_descriptor_t *gate_descriptor) BX_CPP_AttrRegparmN(1);
BX_SMF void jmp_call_gate32(bx_descriptor_t *gate_descriptor) BX_CPP_AttrRegparmN(1);
#if BX_SUPPORT_X86_64
BX_SMF void jmp_call_gate64(bx_descriptor_t *gate_descriptor) BX_CPP_AttrRegparmN(1);
#endif
BX_SMF void call_protected(bxInstruction_c *, Bit16u cs, bx_address disp) BX_CPP_AttrRegparmN(3);
BX_SMF void return_protected(bxInstruction_c *, Bit16u pop_bytes) BX_CPP_AttrRegparmN(2);
BX_SMF void iret_protected(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
@ -2779,19 +2785,21 @@ public: // for now...
BX_SMF void parse_selector(Bit16u raw_selector, bx_selector_t *selector) BX_CPP_AttrRegparmN(2);
BX_SMF void parse_descriptor(Bit32u dword1, Bit32u dword2, bx_descriptor_t *temp) BX_CPP_AttrRegparmN(3);
BX_SMF void load_ldtr(bx_selector_t *selector, bx_descriptor_t *descriptor);
BX_SMF void check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl) BX_CPP_AttrRegparmN(3);
BX_SMF void check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl, Bit8u check_cpl);
BX_SMF void load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl) BX_CPP_AttrRegparmN(3);
BX_SMF void load_ss(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl) BX_CPP_AttrRegparmN(3);
BX_SMF void fetch_raw_descriptor(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2, Bit8u exception) BX_CPP_AttrRegparmN(3);
Bit32u *dword1, Bit32u *dword2, Bit8u exception) BX_CPP_AttrRegparmN(3);
BX_SMF bx_bool fetch_raw_descriptor2(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2) BX_CPP_AttrRegparmN(3);
BX_SMF void load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value) BX_CPP_AttrRegparmN(2);
#if BX_SUPPORT_X86_64
BX_SMF void fetch_raw_descriptor64(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2, Bit32u *dword3, Bit8u exception_no);
BX_SMF void load_ss64(Bit8u cpl) BX_CPP_AttrRegparmN(1);
BX_SMF void loadSRegLMNominal(unsigned seg, unsigned selector,
bx_address base, unsigned dpl);
#endif
BX_SMF bx_bool fetch_raw_descriptor2(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2) BX_CPP_AttrRegparmN(3);
BX_SMF void push_16(Bit16u value16) BX_CPP_AttrRegparmN(1);
BX_SMF void push_32(Bit32u value32);
BX_SMF void push_64(Bit64u value64);

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer_pro.cc,v 1.49 2005-08-01 22:06:18 sshwarts Exp $
// $Id: ctrl_xfer_pro.cc,v 1.50 2005-08-02 18:44:16 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -38,9 +38,16 @@
/* pass zero in check_rpl if no needed selector RPL checking for
non-conforming segments */
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl)
void BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl, Bit8u check_cpl)
{
// descriptor AR byte must indicate code segment else #GP(selector)
if ((descriptor->valid==0) || (descriptor->segment==0) ||
(descriptor->u.segment.executable==0))
{
BX_ERROR(("check_cs: not a valid code segment !"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
#if BX_SUPPORT_X86_64
if (descriptor->u.segment.l)
{
@ -55,31 +62,23 @@ BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl)
}
#endif
// descriptor AR byte must indicate code segment else #GP(selector)
if ((descriptor->valid==0) || (descriptor->segment==0) ||
(descriptor->u.segment.executable==0))
{
BX_ERROR(("check_cs: not a valid code segment !"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// if non-conforming, code segment descriptor DPL must = CPL else #GP(selector)
if (descriptor->u.segment.c_ed==0) {
if (descriptor->dpl != CPL) {
BX_ERROR(("check_cs: non-conforming code seg descriptor DPL != CPL"));
if (descriptor->dpl != check_cpl) {
BX_ERROR(("check_cs: non-conforming code seg descriptor dpl != cpl"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
/* RPL of destination selector must be <= CPL else #GP(selector) */
if (check_rpl > CPL) {
BX_ERROR(("check_cs: non-conforming code seg selector rpl > CPL"));
if (check_rpl > check_cpl) {
BX_ERROR(("check_cs: non-conforming code seg selector rpl > cpl"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
}
// if conforming, then code segment descriptor DPL must <= CPL else #GP(selector)
else {
if (descriptor->dpl > CPL) {
BX_ERROR(("check_cs: conforming code seg descriptor DPL > CPL"));
if (descriptor->dpl > check_cpl) {
BX_ERROR(("check_cs: conforming code seg descriptor dpl > cpl"));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
}
@ -139,6 +138,23 @@ BX_CPU_C::branch_near32(Bit32u new_EIP)
revalidate_prefetch_q();
}
void BX_CPU_C::branch_far32(bx_selector_t *selector,
bx_descriptor_t *descriptor, Bit32u eip, Bit8u cpl)
{
/* instruction pointer must be in code segment limit else #GP(0) */
if (eip > descriptor->u.segment.limit_scaled) {
BX_ERROR(("branch_far: EIP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
/* Load CS:IP from destination pointer */
/* Load CS-cache with new segment descriptor */
load_cs(selector, descriptor, cpl);
/* Change the EIP value */
EIP = eip;
}
#if BX_SUPPORT_X86_64
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::branch_near64(bxInstruction_c *i)
@ -160,23 +176,6 @@ BX_CPU_C::branch_near64(bxInstruction_c *i)
}
#endif
void BX_CPU_C::branch_far32(bx_selector_t *selector,
bx_descriptor_t *descriptor, Bit32u eip, Bit8u cpl)
{
/* instruction pointer must be in code segment limit else #GP(0) */
if (eip > descriptor->u.segment.limit_scaled) {
BX_ERROR(("branch_far: EIP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
/* Load CS:IP from destination pointer */
/* Load CS-cache with new segment descriptor */
load_cs(selector, descriptor, cpl);
/* Change the EIP value */
RIP = eip;
}
void BX_CPU_C::branch_far64(bx_selector_t *selector,
bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl)
{

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: exception.cc,v 1.58 2005-07-10 20:32:31 sshwarts Exp $
// $Id: exception.cc,v 1.59 2005-08-02 18:44:16 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -98,7 +98,6 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
BX_ERROR(("interrupt(long mode): gate.type(%u) != {5,6,7,14,15}",
(unsigned) gate_descriptor.type));
exception(BX_GP_EXCEPTION, vector*8 + 2, 0);
return;
}
// if software interrupt, then gate descripor DPL must be >= CPL,
@ -107,7 +106,6 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
{
BX_ERROR(("interrupt(long mode): is_INT && (dpl < CPL)"));
exception(BX_GP_EXCEPTION, vector*8 + 2, 0);
return;
}
// Gate must be present, else #NP(vector * 8 + 2 + EXT)
@ -340,7 +338,6 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
BX_DEBUG(("interrupt(): gate.type(%u) != {5,6,7,14,15}",
(unsigned) gate_descriptor.type));
exception(BX_GP_EXCEPTION, vector*8 + 2, 0);
return;
}
// if software interrupt, then gate descripor DPL must be >= CPL,
@ -370,7 +367,6 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
if (tss_selector.ti) {
BX_PANIC(("interrupt: tss_selector.ti=1"));
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
return;
}
// index must be within GDT limits, else #TS(TSS selector)
@ -383,16 +379,17 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
BX_PANIC(("exception: TSS selector points to bad TSS"));
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
return;
}
if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
BX_INFO(("exception: TSS selector points to bad TSS"));
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
return;
}
// TSS must be present, else #NP(TSS selector)
// done in task_switch()
if (! IS_PRESENT(tss_descriptor)) {
BX_ERROR(("exception: TSS descriptor.p == 0"));
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// switch tasks with nesting to TSS
task_switch(&tss_selector, &tss_descriptor,
@ -402,9 +399,7 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
// stack limits must allow push of 2 more bytes, else #SS(0)
// push error code onto stack
//??? push_16 vs push_32
if ( is_error_code ) {
//if (tss_descriptor.type==9)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
push_32(error_code);
else

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: jmp_far.cc,v 1.2 2005-08-01 22:18:40 sshwarts Exp $
// $Id: jmp_far.cc,v 1.3 2005-08-02 18:44:20 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -35,7 +35,6 @@
#define RIP EIP
#endif
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
{
@ -59,26 +58,11 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
parse_descriptor(dword1, dword2, &descriptor);
if ( descriptor.segment ) {
check_cs(&descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw));
check_cs(&descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw), CPL);
branch_far64(&selector, &descriptor, disp, CPL);
return;
}
else {
Bit16u raw_tss_selector;
bx_selector_t tss_selector, gate_cs_selector;
bx_descriptor_t tss_descriptor, gate_cs_descriptor;
Bit16u gate_cs_raw;
Bit32u temp_eIP;
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
if (descriptor.type != BX_386_CALL_GATE) {
BX_ERROR(("jump_protected: gate type %u unsupported in long mode", (unsigned) descriptor.type));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
}
#endif
// call gate DPL must be >= CPL else #GP(gate selector)
if (descriptor.dpl < CPL) {
BX_ERROR(("jump_protected: call gate.dpl < CPL"));
@ -91,17 +75,33 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// task gate must be present else #NP(gate selector)
if (! IS_PRESENT(descriptor)) {
BX_ERROR(("jump_protected: call gate.p == 0"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
}
#if BX_SUPPORT_X86_64
if (IsLongMode()) {
if (descriptor.type != BX_386_CALL_GATE) {
BX_ERROR(("jump_protected: gate type %u unsupported in long mode", (unsigned) descriptor.type));
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
}
else {
jmp_call_gate64(&descriptor);
return;
}
}
#endif
switch ( descriptor.type ) {
case BX_SYS_SEGMENT_AVAIL_286_TSS:
case BX_SYS_SEGMENT_AVAIL_386_TSS:
//if ( descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS )
// BX_INFO(("jump to 286 TSS"));
//else
// BX_INFO(("jump to 386 TSS"));
// Task State Seg must be present, else #NP(TSS selector)
// checked in task_switch()
if ( descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS )
BX_DEBUG(("jump to 286 TSS"));
else
BX_DEBUG(("jump to 386 TSS"));
// SWITCH_TASKS _without_ nesting to TSS
task_switch(&selector, &descriptor, BX_TASK_FROM_JUMP, dword1, dword2);
@ -113,166 +113,16 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
}
return;
case BX_TASK_GATE:
// task gate must be present else #NP(gate selector)
if (! IS_PRESENT(descriptor)) {
BX_ERROR(("jump_protected: task gate.p == 0"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// examine selector to TSS, given in Task Gate descriptor
// must specify global in the local/global bit else #GP(TSS selector)
raw_tss_selector = descriptor.u.taskgate.tss_selector;
parse_selector(raw_tss_selector, &tss_selector);
if (tss_selector.ti) {
BX_ERROR(("jump_protected: tss_selector.ti=1"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// index must be within GDT limits else #GP(TSS selector)
fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
// descriptor AR byte must specify available TSS
// else #GP(TSS selector)
parse_descriptor(dword1, dword2, &tss_descriptor);
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// task state segment must be present, else #NP(tss selector)
if (! IS_PRESENT(tss_descriptor)) {
BX_ERROR(("jump_protected: task descriptor.p == 0"));
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// SWITCH_TASKS _without_ nesting to TSS
task_switch(&tss_selector, &tss_descriptor,
BX_TASK_FROM_JUMP, dword1, dword2);
// EIP must be within code segment limit, else #GP(0)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
temp_eIP = EIP;
else
temp_eIP = IP;
if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
BX_ERROR(("jump_protected: EIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
break;
case BX_286_CALL_GATE:
BX_DEBUG(("jump_protected: JUMP TO 286 CALL GATE"));
jmp_call_gate16(&descriptor);
return;
// gate must be present else #NP(gate selector)
if (! IS_PRESENT(descriptor)) {
BX_ERROR(("jump_protected: call gate not present !"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
}
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
gate_cs_raw = descriptor.u.gate286.dest_selector;
if ((gate_cs_raw & 0xfffc) == 0) {
BX_ERROR(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(gate_cs_raw, &gate_cs_selector);
// selector must be within its descriptor table limits else #GP(CS selector)
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
// check code-segment descriptor, no need to check selector RPL
// for non-conforming segments
check_cs(&gate_cs_descriptor, gate_cs_raw, 0);
branch_far32(&gate_cs_selector, &gate_cs_descriptor,
descriptor.u.gate286.dest_offset, CPL);
case BX_TASK_GATE:
jmp_task_gate(&descriptor);
return;
case BX_386_CALL_GATE:
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR msr.lma) { // LONG MODE
BX_PANIC(("jump to CALL_GATE64 still not implemented"));
}
#endif
// gate must be present else #NP(gate selector)
if (! IS_PRESENT(descriptor)) {
BX_ERROR(("jump_protected: task gate.p == 0"));
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
return;
}
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
gate_cs_raw = descriptor.u.gate386.dest_selector;
if ((gate_cs_raw & 0xfffc) == 0) {
BX_ERROR(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(gate_cs_raw, &gate_cs_selector);
// selector must be within its descriptor table limits else #GP(CS selector)
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
/*
STILL NOT IMPLEMENTED ...
IF (LONG_MODE)
{
// in long mode, we need to read the 2nd half of a 16-byte call-gate
// from the gdt/ldt to get the upper 32 bits of the target RIP
temp_upper = READ_MEM.q [temp_sel+8]
// Make sure the extended attribute bits are all zero.
IF (temp_upper's extended attribute bits != 0)
EXCEPTION [#GP(temp_sel)]
// concatenate both halves of RIP
temp_RIP = tempRIP + (temp_upper SHL 32)
}
// set up new CS base, attr, limits
CS = READ_DESCRIPTOR (temp_desc.segment, clg_chk)
IF ((64BIT_MODE) && (temp_RIP is non-canonical)
|| (!64BIT_MODE) && (temp_RIP > CS.limit))
{
EXCEPTION [#GP(0)]
}
RIP = temp_RIP
*/
// check code-segment descriptor, no need to check selector RPL
// for non-conforming segments
check_cs(&gate_cs_descriptor, gate_cs_raw, 0);
// IP must be in code segment limit else #GP(0)
if ( descriptor.u.gate386.dest_offset >
gate_cs_descriptor.u.segment.limit_scaled )
{
BX_ERROR(("jump_protected: EIP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
// load CS:IP from call gate
// load CS cache with new code segment
// set rpl of CS to CPL
load_cs(&gate_cs_selector, &gate_cs_descriptor, CPL);
EIP = descriptor.u.gate386.dest_offset;
jmp_call_gate32(&descriptor);
return;
default:
@ -281,3 +131,164 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
}
}
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::jmp_task_gate(bx_descriptor_t *gate_descriptor)
{
Bit16u raw_tss_selector;
bx_selector_t tss_selector;
bx_descriptor_t tss_descriptor;
Bit32u dword1, dword2;
Bit32u temp_eIP;
// examine selector to TSS, given in Task Gate descriptor
// must specify global in the local/global bit else #GP(TSS selector)
raw_tss_selector = gate_descriptor->u.taskgate.tss_selector;
parse_selector(raw_tss_selector, &tss_selector);
if (tss_selector.ti) {
BX_ERROR(("jump_protected: tss_selector.ti=1"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// index must be within GDT limits else #GP(TSS selector)
fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
// descriptor AR byte must specify available TSS
// else #GP(TSS selector)
parse_descriptor(dword1, dword2, &tss_descriptor);
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// task state segment must be present, else #NP(tss selector)
if (! IS_PRESENT(tss_descriptor)) {
BX_ERROR(("jump_protected: TSS descriptor.p == 0"));
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
}
// SWITCH_TASKS _without_ nesting to TSS
task_switch(&tss_selector, &tss_descriptor, BX_TASK_FROM_JUMP, dword1, dword2);
// EIP must be within code segment limit, else #GP(0)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
temp_eIP = EIP;
else
temp_eIP = IP;
if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
BX_ERROR(("jump_protected: EIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::jmp_call_gate16(bx_descriptor_t *gate_descriptor)
{
bx_selector_t gate_cs_selector;
bx_descriptor_t gate_cs_descriptor;
Bit32u dword1, dword2;
BX_DEBUG(("jump_protected: JUMP TO 286 CALL GATE"));
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
Bit16u gate_cs_raw = gate_descriptor->u.gate286.dest_selector;
if ((gate_cs_raw & 0xfffc) == 0) {
BX_ERROR(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(gate_cs_raw, &gate_cs_selector);
// selector must be within its descriptor table limits else #GP(CS selector)
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
// check code-segment descriptor
check_cs(&gate_cs_descriptor, gate_cs_raw, 0, CPL);
Bit16u temp_IP = gate_descriptor->u.gate286.dest_offset;
branch_far32(&gate_cs_selector, &gate_cs_descriptor, temp_IP, CPL);
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::jmp_call_gate32(bx_descriptor_t *gate_descriptor)
{
bx_selector_t gate_cs_selector;
bx_descriptor_t gate_cs_descriptor;
Bit32u dword1, dword2;
BX_DEBUG(("jump_protected: JUMP TO 386 CALL GATE"));
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
Bit16u gate_cs_raw = gate_descriptor->u.gate386.dest_selector;
if ((gate_cs_raw & 0xfffc) == 0) {
BX_ERROR(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(gate_cs_raw, &gate_cs_selector);
// selector must be within its descriptor table limits else #GP(CS selector)
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
// check code-segment descriptor
check_cs(&gate_cs_descriptor, gate_cs_raw, 0, CPL);
Bit32u temp_EIP = gate_descriptor->u.gate386.dest_offset;
branch_far32(&gate_cs_selector, &gate_cs_descriptor, temp_EIP, CPL);
}
#if BX_SUPPORT_X86_64
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::jmp_call_gate64(bx_descriptor_t *gate_descriptor)
{
bx_selector_t gate_cs_selector;
bx_descriptor_t gate_cs_descriptor;
Bit32u dword1, dword2, dword3;
Bit64u temp_RIP;
BX_DEBUG(("jump_protected: jump to CALL GATE 64"));
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
Bit16u gate_cs_raw = gate_descriptor->u.gate386.dest_selector;
if ((gate_cs_raw & 0xfffc) == 0) {
BX_ERROR(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(gate_cs_raw, &gate_cs_selector);
// selector must be within its descriptor table limits else #GP(CS selector)
fetch_raw_descriptor64(&gate_cs_selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
// In long mode, only 64-bit call gates are allowed, and they must point
// to 64-bit code segments, else #GP(selector)
if (! IS_LONG64_SEGMENT(gate_cs_descriptor) || gate_cs_descriptor.u.segment.d_b)
{
BX_ERROR(("jump_protected: not 64-bit code segment in call gate 64"));
exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
}
// check code-segment descriptor
check_cs(&gate_cs_descriptor, gate_cs_raw, 0, CPL);
temp_RIP = gate_descriptor->u.gate386.dest_offset;
temp_RIP |= ((Bit64u)dword3 << 32);
branch_far64(&gate_cs_selector, &gate_cs_descriptor, temp_RIP, CPL);
}
#endif

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: ret_far.cc,v 1.2 2005-08-01 22:18:40 sshwarts Exp $
// $Id: ret_far.cc,v 1.3 2005-08-02 18:44:20 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -43,7 +43,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
Bit16u raw_cs_selector, raw_ss_selector;
bx_selector_t cs_selector, ss_selector;
bx_descriptor_t cs_descriptor, ss_descriptor;
Bit32u stack_cs_offset, stack_param_offset;
Bit32u stack_param_offset;
bx_address return_RIP, return_RSP, temp_RSP;
Bit32u dword1, dword2;
@ -56,40 +56,6 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
/* + 2: CS | + 4: CS | + 8: CS */
/* + 0: IP | + 0: EIP | + 0: RIP */
#if BX_SUPPORT_X86_64
if ( i->os64L() ) {
/* operand size=64: 2nd qword on stack must be within stack limits,
* else #SS(0); */
if (!can_pop(16)) {
BX_ERROR(("return_protected: 2rd qword not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
stack_cs_offset = 8;
stack_param_offset = 16;
}
else
#endif
if ( i->os32L() ) {
/* operand size=32: 2nd dword on stack must be within stack limits,
* else #SS(0); */
if (!can_pop(8)) {
BX_ERROR(("return_protected: 2rd dword not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
stack_cs_offset = 4;
stack_param_offset = 8;
}
else {
/* operand size=16: second word on stack must be within stack limits,
* else #SS(0); */
if ( !can_pop(4) ) {
BX_ERROR(("return_protected: 2nd word not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
stack_cs_offset = 2;
stack_param_offset = 4;
}
#if BX_SUPPORT_X86_64
if (StackAddrSize64()) temp_RSP = RSP;
else
@ -99,70 +65,103 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
else temp_RSP = SP;
}
// return selector RPL must be >= CPL, else #GP(return selector)
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP +
stack_cs_offset, 2, CPL==3, BX_READ, &raw_cs_selector);
#if BX_SUPPORT_X86_64
if ( i->os64L() ) {
/* operand size=64: 2nd qword on stack must be within stack limits,
* else #SS(0); */
if (!can_pop(16))
{
BX_ERROR(("return_protected: 2rd qword not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8,
2, CPL==3, BX_READ, &raw_cs_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
8, CPL==3, BX_READ, &return_RIP);
stack_param_offset = 16;
}
else
#endif
if ( i->os32L() ) {
/* operand size=32: 2nd dword on stack must be within stack limits,
* else #SS(0); */
if (!can_pop(8))
{
BX_ERROR(("return_protected: 2rd dword not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
Bit32u return_EIP = 0;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4,
2, CPL==3, BX_READ, &raw_cs_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
4, CPL==3, BX_READ, &return_EIP);
return_RIP = return_EIP;
stack_param_offset = 8;
}
else {
/* operand size=16: second word on stack must be within stack limits,
* else #SS(0); */
if ( !can_pop(4) )
{
BX_ERROR(("return_protected: 2nd word not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
Bit16u return_IP = 0;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 2,
2, CPL==3, BX_READ, &raw_cs_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
2, CPL==3, BX_READ, &return_IP);
return_RIP = return_IP;
stack_param_offset = 4;
}
// selector must be non-null else #GP(0)
if ( (raw_cs_selector & 0xfffc) == 0 ) {
BX_INFO(("return_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
parse_selector(raw_cs_selector, &cs_selector);
// selector index must be within its descriptor table limits,
// else #GP(selector)
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
// descriptor AR byte must indicate code segment, else #GP(selector)
parse_descriptor(dword1, dword2, &cs_descriptor);
// return selector RPL must be >= CPL, else #GP(return selector)
if (cs_selector.rpl < CPL) {
BX_ERROR(("return_protected: CS.rpl < CPL"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
}
// check code-segment descriptor
check_cs(&cs_descriptor, raw_cs_selector, 0, cs_selector.rpl);
// if return selector RPL == CPL then
// RETURN TO SAME PRIVILEGE LEVEL
if (cs_selector.rpl == CPL)
{
// BX_INFO(("return: to same level %04x:%08x",
// BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
// BX_CPU_THIS_PTR prev_eip));
// return selector must be non-null, else #GP(0) (???)
if ((raw_cs_selector & 0xfffc) == 0) {
BX_INFO(("return_protected: CS null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
// selector index must be within its descriptor table limits,
// else #GP(selector)
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
// descriptor AR byte must indicate code segment, else #GP(selector)
parse_descriptor(dword1, dword2, &cs_descriptor);
check_cs(&cs_descriptor, raw_cs_selector, 0);
BX_DEBUG(("return_protected: return to SAME PRIVILEGE LEVEL"));
// top word on stack must be within stack limits, else #SS(0)
if ( !can_pop(stack_param_offset + pop_bytes) ) {
if (! can_pop(stack_param_offset + pop_bytes)) {
BX_ERROR(("return_protected: top word not in stack limits"));
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
}
#if BX_SUPPORT_X86_64
if (i->os64L()) {
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
8, CPL==3, BX_READ, &return_RIP);
}
else
#endif
if (i->os32L()) {
Bit32u return_EIP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
4, CPL==3, BX_READ, &return_EIP);
return_RIP = return_EIP;
}
else {
Bit16u return_IP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
2, CPL==3, BX_READ, &return_IP);
return_RIP = return_IP;
exception(BX_SS_EXCEPTION, 0, 0);
}
branch_far64(&cs_selector, &cs_descriptor, return_RIP, CPL);
// increment eSP
#if BX_SUPPORT_X86_64
if (StackAddrSize64())
RSP += stack_param_offset + pop_bytes;
RSP += stack_param_offset + pop_bytes;
else
#endif
{
@ -171,7 +170,6 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
else
SP += stack_param_offset + pop_bytes;
}
return;
}
/* RETURN TO OUTER PRIVILEGE LEVEL */
@ -185,97 +183,75 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
/* + 2: CS | + 4: CS | + 8: CS */
/* + 0: IP | + 0: EIP | + 0: RIP */
//BX_INFO(("return: to outer level %04x:%08x",
// BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
// BX_CPU_THIS_PTR prev_eip));
#if BX_SUPPORT_X86_64
if (i->os64L()) {
BX_PANIC(("RETF64: return to outer priviledge level still not implemented !"));
}
#endif
/* top 32+immediate bytes on stack must be within stack limits, else #SS(0) */
if ( !can_pop(32 + pop_bytes) ) {
BX_ERROR(("return_protected: 32 bytes not within stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
}
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16 + pop_bytes,
8, 0, BX_READ, &return_RSP);
}
else
#endif
if (i->os32L()) {
/* top 16+immediate bytes on stack must be within stack limits, else #SS(0) */
if ( !can_pop(16 + pop_bytes) ) {
BX_ERROR(("return_protected: 16 bytes not within stack limits"));
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
exception(BX_SS_EXCEPTION, 0, 0);
}
Bit32u return_ESP = 0;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 12 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8 + pop_bytes,
4, 0, BX_READ, &return_ESP);
return_RSP = return_ESP;
}
else {
/* top 8+immediate bytes on stack must be within stack limits, else #SS(0) */
if ( !can_pop(8 + pop_bytes) ) {
BX_ERROR(("return_protected: 8 bytes not within stack limits"));
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
exception(BX_SS_EXCEPTION, 0, 0);
}
}
/* examine return CS selector and associated descriptor */
/* selector must be non-null else #GP(0) */
if ( (raw_cs_selector & 0xfffc) == 0 ) {
BX_INFO(("return_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
/* selector index must be within its descriptor table limits,
* else #GP(selector) */
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &cs_descriptor);
check_cs(&cs_descriptor, raw_cs_selector, 0);
/* examine return SS selector and associated descriptor: */
#if BX_SUPPORT_X86_64
if (i->os64L()) {
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16 + pop_bytes,
8, 0, BX_READ, &return_RSP);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
8, 0, BX_READ, &return_RIP);
}
else
#endif
if (i->os32L()) {
Bit16u return_EIP, return_ESP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 12 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8 + pop_bytes,
4, 0, BX_READ, &return_ESP);
return_RSP = return_ESP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
4, 0, BX_READ, &return_EIP);
return_RIP = return_EIP;
}
else {
Bit16u return_SP, return_IP;
Bit16u return_SP = 0;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 6 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4 + pop_bytes,
2, 0, BX_READ, &return_SP);
return_RSP = return_SP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
2, 0, BX_READ, &return_IP);
return_RIP = return_IP;
}
/* selector must be non-null else #GP(0) */
if ( (raw_ss_selector & 0xfffc) == 0 ) {
BX_INFO(("return_protected: SS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
/* selector index must be within its descriptor table limits,
* else #GP(selector) */
parse_selector(raw_ss_selector, &ss_selector);
if ((raw_ss_selector & 0xfffc) == 0) {
if (IsLongMode()) {
if (! IS_LONG64_SEGMENT(cs_descriptor) || (ss_selector.rpl == 3)) {
BX_ERROR(("return_protected: SS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
else // not in long or compatibility mode
{
BX_ERROR(("return_protected: SS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &ss_descriptor);
/* selector RPL must = RPL of the return CS selector,
* else #GP(selector) */
if (ss_selector.rpl != cs_selector.rpl) {
BX_INFO(("return_protected: ss.rpl != cs.rpl"));
BX_ERROR(("return_protected: ss.rpl != cs.rpl"));
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
}
@ -286,14 +262,14 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
ss_descriptor.u.segment.r_w==0)
{
BX_PANIC(("return_protected: SS.AR byte not writable data"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
}
/* descriptor dpl must = RPL of the return CS selector,
* else #GP(selector) (???) */
* else #GP(selector) */
if (ss_descriptor.dpl != cs_selector.rpl) {
BX_PANIC(("return_protected: SS.dpl != cs.rpl"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
BX_ERROR(("return_protected: SS.dpl != cs.rpl"));
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
}
/* segment must be present else #SS(selector) */
@ -307,8 +283,13 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
/* load SS:SP from stack */
/* load SS-cache with return SS descriptor */
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
#if BX_SUPPORT_X86_64
if (StackAddrSize64()) RSP = return_RSP + pop_bytes;
else
#endif
if (ss_descriptor.u.segment.d_b)
RSP = return_RSP + pop_bytes;
ESP = (Bit32u) return_RSP + pop_bytes;
else
SP = (Bit16u) return_RSP + pop_bytes;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: segment_ctrl_pro.cc,v 1.44 2005-08-01 22:06:19 sshwarts Exp $
// $Id: segment_ctrl_pro.cc,v 1.45 2005-08-02 18:44:20 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -626,8 +626,6 @@ BX_CPU_C::fetch_raw_descriptor(bx_selector_t *selector,
}
}
#endif
bx_bool BX_CPP_AttrRegparmN(3)
BX_CPU_C::fetch_raw_descriptor2(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2)
@ -655,3 +653,44 @@ BX_CPU_C::fetch_raw_descriptor2(bx_selector_t *selector,
return(1);
}
}
#if BX_SUPPORT_X86_64
void BX_CPU_C::fetch_raw_descriptor64(bx_selector_t *selector,
Bit32u *dword1, Bit32u *dword2, Bit32u *dword3, Bit8u exception_no)
{
if (selector->ti == 0) { /* GDT */
if ((selector->index*8 + 15) > BX_CPU_THIS_PTR gdtr.limit) {
BX_ERROR(("fetch_raw_descriptor64: GDT: index (%x)%x > limit (%x)",
(selector->index*8 + 15), selector->index,
BX_CPU_THIS_PTR gdtr.limit));
exception(exception_no, selector->value & 0xfffc, 0);
}
access_linear(BX_CPU_THIS_PTR gdtr.base + selector->index*8, 4, 0,
BX_READ, dword1);
access_linear(BX_CPU_THIS_PTR gdtr.base + selector->index*8 + 4, 4, 0,
BX_READ, dword2);
access_linear(BX_CPU_THIS_PTR gdtr.base + selector->index*8 + 8, 4, 0,
BX_READ, dword3);
}
else { /* LDT */
if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
BX_PANIC(("fetch_raw_descriptor: LDTR.valid=0"));
debug(BX_CPU_THIS_PTR prev_eip);
}
if ((selector->index*8 + 15) > BX_CPU_THIS_PTR ldtr.cache.u.ldt.limit) {
BX_ERROR(("fetch_raw_descriptor64: LDT: index (%x)%x > limit (%x)",
(selector->index*8 + 15), selector->index,
BX_CPU_THIS_PTR ldtr.cache.u.ldt.limit));
exception(exception_no, selector->value & 0xfffc, 0);
}
access_linear(BX_CPU_THIS_PTR ldtr.cache.u.ldt.base + selector->index*8, 4, 0,
BX_READ, dword1);
access_linear(BX_CPU_THIS_PTR ldtr.cache.u.ldt.base + selector->index*8 + 4, 4, 0,
BX_READ, dword2);
access_linear(BX_CPU_THIS_PTR ldtr.cache.u.ldt.base + selector->index*8 + 8, 4, 0,
BX_READ, dword3);
}
}
#endif
#endif