(cpu64) Merged a couple more files.

This commit is contained in:
Kevin Lawton 2002-09-15 02:23:12 +00:00
parent 72f37c0fc5
commit b68c2b929a
3 changed files with 293 additions and 17 deletions

View File

@ -50,8 +50,6 @@ EXT_DEBUG_OBJS = @EXT_DEBUG_OBJS@
# are not used for the x86-64 compile because they have not
# been synced yet.
OBJS32 = \
ctrl_xfer_pro.o \
segment_ctrl_pro.o \
paging.o \
bit.o \
io.o \
@ -104,6 +102,8 @@ OBJSXX = \
flag_ctrl.o \
mult32.o \
arith32.o \
ctrl_xfer_pro.o \
segment_ctrl_pro.o \
# Objects which are only used for x86-64 code, but which have been

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer_pro.cc,v 1.13 2002-09-13 00:15:23 kevinlawton Exp $
// $Id: ctrl_xfer_pro.cc,v 1.14 2002-09-15 02:23:12 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -32,11 +32,15 @@
#if BX_SUPPORT_X86_64==0
// Make life easier merging cpu64 & cpu code.
#define RIP EIP
#endif
#if BX_CPU_LEVEL >= 2
void
BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, bx_address dispBig)
{
bx_descriptor_t descriptor;
bx_selector_t selector;
@ -84,7 +88,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
}
/* instruction pointer must be in code segment limit else #GP(0) */
if (disp32 > descriptor.u.segment.limit_scaled) {
if (dispBig > descriptor.u.segment.limit_scaled) {
BX_PANIC(("jump_protected: IP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
@ -94,7 +98,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
/* Load CS-cache with new segment descriptor */
/* CPL does not change for conforming code segment */
load_cs(&selector, &descriptor, CPL);
EIP = disp32;
RIP = dispBig;
return;
}
@ -122,7 +126,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
}
/* IP must be in code segment limit else #GP(0) */
if (disp32 > descriptor.u.segment.limit_scaled) {
if (dispBig > descriptor.u.segment.limit_scaled) {
BX_PANIC(("jump_protected: IP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
@ -132,7 +136,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
/* load CS-cache with new segment descriptor */
/* set RPL field of CS register to CPL */
load_cs(&selector, &descriptor, CPL);
EIP = disp32;
RIP = dispBig;
return;
}
BX_PANIC(("jump_protected: segment=1"));
@ -220,7 +224,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
gate_cs_raw = descriptor.u.gate286.dest_selector;
if ( (gate_cs_raw & 0xfffc) == 0 ) {
if ( (gate_cs_raw & 0xfffc) == 0) {
BX_PANIC(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0x0000, 0);
}
@ -382,7 +386,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
// examine selector to code segment given in call gate descriptor
// selector must not be null, else #GP(0)
gate_cs_raw = descriptor.u.gate386.dest_selector;
if ( (gate_cs_raw & 0xfffc) == 0 ) {
if ( (gate_cs_raw & 0xfffc) == 0) {
BX_PANIC(("jump_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0x0000, 0);
}
@ -451,7 +455,7 @@ BX_CPU_C::jump_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
#if BX_CPU_LEVEL >= 2
void
BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, bx_address dispBig)
{
bx_selector_t cs_selector;
Bit32u dword1, dword2;
@ -465,7 +469,7 @@ BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
*/
/* new cs selector must not be null, else #GP(0) */
if ( (cs_raw & 0xfffc) == 0 ) {
if ( (cs_raw & 0xfffc) == 0) {
BX_PANIC(("call_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
}
@ -531,7 +535,7 @@ BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
}
// IP must be in code seg limit, else #GP(0)
if (disp32 > cs_descriptor.u.segment.limit_scaled) {
if (dispBig > cs_descriptor.u.segment.limit_scaled) {
BX_PANIC(("call_protected: IP not in code seg limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
@ -547,7 +551,7 @@ BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
}
// IP must be in code seg limit, else #GP(0)
if (disp32 > cs_descriptor.u.segment.limit_scaled) {
if (dispBig > cs_descriptor.u.segment.limit_scaled) {
BX_PANIC(("call_protected: IP not in code seg limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
@ -561,9 +565,15 @@ BX_CPU_C::call_protected(BxInstruction_t *i, Bit16u cs_raw, Bit32u disp32)
// set RPL of CS to CPL
// load eIP with new offset
load_cs(&cs_selector, &cs_descriptor, CPL);
EIP = disp32;
RIP = dispBig;
#if BX_SUPPORT_X86_64
// not sure about this ???? - fix to work in long mode
if (cs_descriptor.u.segment.d_b==0 && cs_descriptor.u.segment.l==0)
EIP &= 0x0000ffff;
#else
if (cs_descriptor.u.segment.d_b==0)
EIP &= 0x0000ffff;
#endif
return;
}
else { // gate & special segment
@ -1446,7 +1456,244 @@ BX_CPU_C::iret_protected(BxInstruction_t *i)
return;
}
else { /* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
#if BX_SUPPORT_X86_64
else if (BX_CPU_THIS_PTR msr.lma) { // LONG MODE
//BX_DEBUG (("LONG mode IRET"));
/* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
Bit16u top_nbytes_same, top_nbytes_outer;
Bit64u cs_offset, ss_offset;
Bit64u new_rip, new_rsp, temp_RSP, new_eflags;
Bit8u prev_cpl;
/* 64bit opsize
* ============
* SS eSP+32
* ESP eSP+24
* -------------
* EFLAGS eSP+16
* CS eSP+8
* EIP eSP+0
*/
top_nbytes_same = 24;
top_nbytes_outer = 40;
cs_offset = 8;
ss_offset = 32;
/* CS on stack must be within stack limits, else #SS(0) */
if ( !can_pop(top_nbytes_same) ) {
BX_PANIC(("iret: CS not within stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
return;
}
temp_RSP = RSP;
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + cs_offset,
2, CPL==3, BX_READ, &raw_cs_selector);
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 0,
8, CPL==3, BX_READ, &new_rip);
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 16,
8, CPL==3, BX_READ, &new_eflags);
// if VM=1 in flags image on stack then STACK_RETURN_TO_V86
if (new_eflags & 0x00020000) {
BX_PANIC(("iret: no V86 mode in x86-64 LONG mode"));
return;
}
parse_selector(raw_cs_selector, &cs_selector);
// return CS selector must be non-null, else #GP(0)
if ( (raw_cs_selector & 0xfffc) == 0 ) {
BX_PANIC(("iret: return CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
// selector index must be within descriptor table limits,
// else #GP(return selector)
fetch_raw_descriptor(&cs_selector, &dword1, &dword2,
BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &cs_descriptor);
// AR byte must indicate code segment else #GP(return selector)
if ( cs_descriptor.valid==0 ||
cs_descriptor.segment==0 ||
cs_descriptor.u.segment.executable==0 ) {
BX_PANIC(("iret: AR byte indicated non code segment"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
}
// return CS selector RPL must be >= CPL, else #GP(return selector)
if (cs_selector.rpl < CPL) {
BX_PANIC(("iret: return selector RPL < CPL"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
}
// if return code seg descriptor is conforming
// and return code seg DPL > return code seg selector RPL
// then #GP(return selector)
if ( cs_descriptor.u.segment.c_ed &&
cs_descriptor.dpl > cs_selector.rpl ) {
BX_PANIC(("iret: conforming, DPL > cs_selector.RPL"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
}
// if return code seg descriptor is non-conforming
// and return code seg DPL != return code seg selector RPL
// then #GP(return selector)
if ( cs_descriptor.u.segment.c_ed==0 &&
cs_descriptor.dpl != cs_selector.rpl ) {
BX_INFO(("(mch) iret: Return with DPL != RPL. #GP(selector)"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
}
// segment must be present else #NP(return selector)
if ( cs_descriptor.p==0 ) {
BX_PANIC(("iret: not present"));
exception(BX_NP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
}
if (cs_selector.rpl == CPL
&& BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64) { /* INTERRUPT RETURN TO SAME LEVEL */
/* top 24 bytes on stack must be within limits, else #SS(0) */
/* satisfied above */
/* load CS:EIP from stack */
/* load CS-cache with new code segment descriptor */
load_cs(&cs_selector, &cs_descriptor, CPL);
RIP = new_rip;
/* load EFLAGS with 3rd doubleword from stack */
write_eflags(new_eflags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL(), 0, 1);
/* increment stack by 24 */
RSP += top_nbytes_same;
}
else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 bit mode */
/* 64bit opsize
* ============
* SS eSP+32
* ESP eSP+24
* EFLAGS eSP+16
* CS eSP+8
* EIP eSP+0
*/
/* top 10/20 bytes on stack must be within limits else #SS(0) */
if ( !can_pop(top_nbytes_outer) ) {
BX_PANIC(("iret: top 10/20 bytes not within stack limits"));
exception(BX_SS_EXCEPTION, 0, 0);
return;
}
/* examine return SS selector and associated descriptor */
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + ss_offset,
2, 0, BX_READ, &raw_ss_selector);
// long mode - allowed to be null
///* selector must be non-null, else #GP(0) */
//if ( (raw_ss_selector & 0xfffc) == 0 ) {
// BX_PANIC(("iret: SS selector null"));
// exception(BX_GP_EXCEPTION, 0, 0);
// return;
// }
parse_selector(raw_ss_selector, &ss_selector);
/* selector RPL must = RPL of return CS selector,
* else #GP(SS selector) */
if ( ss_selector.rpl != cs_selector.rpl) {
BX_PANIC(("iret: SS.rpl != CS.rpl"));
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
return;
}
/* selector index must be within its descriptor table limits,
* else #GP(SS selector) */
fetch_raw_descriptor(&ss_selector, &dword1, &dword2,
BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &ss_descriptor);
/* AR byte must indicate a writable data segment,
* else #GP(SS selector) */
if ( ss_descriptor.valid==0 ||
ss_descriptor.segment==0 ||
ss_descriptor.u.segment.executable ||
ss_descriptor.u.segment.r_w==0 ) {
BX_PANIC(("iret: SS AR byte not writable code segment"));
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
return;
}
/* stack segment DPL must equal the RPL of the return CS selector,
* else #GP(SS selector) */
if ( ss_descriptor.dpl != cs_selector.rpl ) {
BX_PANIC(("iret: SS.dpl != CS selector RPL"));
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
return;
}
/* SS must be present, else #NP(SS selector) */
if ( ss_descriptor.p==0 ) {
BX_PANIC(("iret: SS not present!"));
exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
return;
}
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 0,
8, 0, BX_READ, &new_rip);
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 16,
4, 0, BX_READ, &new_eflags);
access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 24,
8, 0, BX_READ, &new_rsp);
/* RIP must be in code segment limit, else #GP(0) */
if (cs_descriptor.u.segment.l == 0 && new_rip > cs_descriptor.u.segment.limit_scaled ) {
BX_PANIC(("iret: IP > descriptor limit"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
/* load CS:RIP from stack */
/* load the CS-cache with CS descriptor */
/* set CPL to the RPL of the return CS selector */
prev_cpl = CPL; /* previous CPL */
load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl);
BX_CPU_THIS_PTR rip = new_rip;
/* load flags from stack */
// perhaps I should always write_eflags(), thus zeroing
// out the upper 16bits of eflags for CS.D_B==0 ???
if (cs_descriptor.u.segment.d_b)
write_eflags(new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL(), 0, 1);
else
write_flags((Bit16u) new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL());
// load SS:RSP from stack
// load the SS-cache with SS descriptor
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
RSP = new_rsp;
validate_seg_regs();
return;
}
}
#endif // #if BX_SUPPORT_X86_64
else {
/* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
Bit16u top_nbytes_same, top_nbytes_outer;
Bit32u cs_offset, ss_offset;
Bit32u new_eip, new_esp, temp_ESP, new_eflags;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: segment_ctrl_pro.cc,v 1.11 2001-11-13 05:11:41 bdenney Exp $
// $Id: segment_ctrl_pro.cc,v 1.12 2002-09-15 02:23:12 kevinlawton Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -60,6 +60,9 @@ BX_CPU_C::load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value)
seg->cache.u.segment.limit_scaled = 0xffff;
seg->cache.u.segment.g = 0; /* byte granular */
seg->cache.u.segment.d_b = 0; /* default 16bit size */
#if BX_SUPPORT_X86_64
seg->cache.u.segment.l = 0; /* default 16bit size */
#endif
seg->cache.u.segment.avl = 0;
return;
@ -318,6 +321,9 @@ BX_CPU_C::load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value)
#if BX_CPU_LEVEL >= 3
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 0; /* byte granular */
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0; /* default 16bit size */
#if BX_SUPPORT_X86_64
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* default 16bit size */
#endif
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
#endif
}
@ -378,6 +384,9 @@ BX_CPU_C::parse_descriptor(Bit32u dword1, Bit32u dword2, bx_descriptor_t *temp)
temp->u.segment.limit |= (dword2 & 0x000F0000);
temp->u.segment.g = (dword2 & 0x00800000) > 0;
temp->u.segment.d_b = (dword2 & 0x00400000) > 0;
#if BX_SUPPORT_X86_64
temp->u.segment.l = (dword2 & 0x00200000) > 0;
#endif
temp->u.segment.avl = (dword2 & 0x00100000) > 0;
temp->u.segment.base |= (dword2 & 0xFF000000);
if (temp->u.segment.g) {
@ -501,6 +510,16 @@ BX_CPU_C::load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor,
// (BW) Added cpl to the selector value.
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value =
(0xfffc & BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value) | cpl;
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR msr.lma) {
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_64;
}
else {
BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_COMPAT;
}
}
#endif
}
void
@ -518,6 +537,16 @@ BX_CPU_C::load_ss(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cp
}
}
#if BX_SUPPORT_X86_64
void
BX_CPU_C::load_ss_null(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl)
{
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = *selector;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = *descriptor;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl = cpl;
}
#endif
#if BX_CPU_LEVEL >= 2