2005-07-20 05:26:47 +04:00
|
|
|
////////////////////////////////////////////////////////////////////////
|
2005-08-02 01:40:17 +04:00
|
|
|
// $Id: ctrl_xfer_pro.cc,v 1.48 2005-08-01 21:40:13 sshwarts Exp $
|
2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2001-04-10 06:20:02 +04:00
|
|
|
// Copyright (C) 2001 MandrakeSoft S.A.
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// MandrakeSoft S.A.
|
|
|
|
// 43, rue d'Aboukir
|
|
|
|
// 75002 Paris - France
|
|
|
|
// http://www.linux-mandrake.com/
|
|
|
|
// http://www.mandrakesoft.com/
|
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
|
|
|
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
|
|
|
|
|
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
2001-04-10 05:04:59 +04:00
|
|
|
#include "bochs.h"
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-15 06:23:12 +04:00
|
|
|
#if BX_SUPPORT_X86_64==0
|
|
|
|
// Make life easier merging cpu64 & cpu code.
|
|
|
|
#define RIP EIP
|
2005-07-22 09:00:40 +04:00
|
|
|
#define RSP ESP
|
2002-09-15 06:23:12 +04:00
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
|
2003-03-03 02:59:12 +03:00
|
|
|
void BX_CPP_AttrRegparmN(3)
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
bx_descriptor_t descriptor;
|
|
|
|
bx_selector_t selector;
|
|
|
|
Bit32u dword1, dword2;
|
|
|
|
|
|
|
|
/* destination selector is not null else #GP(0) */
|
|
|
|
if ((cs_raw & 0xfffc) == 0) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("jump_protected: cs == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_selector(cs_raw, &selector);
|
|
|
|
|
|
|
|
/* destination selector index is whithin its descriptor table
|
|
|
|
limits else #GP(selector) */
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* examine AR byte of destination selector for legal values: */
|
|
|
|
parse_descriptor(dword1, dword2, &descriptor);
|
|
|
|
|
|
|
|
if ( descriptor.segment ) {
|
2005-08-02 01:40:17 +04:00
|
|
|
check_cs(&descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw));
|
2005-07-31 21:57:27 +04:00
|
|
|
branch_far64(&selector, &descriptor, disp, CPL);
|
2005-03-20 21:08:46 +03:00
|
|
|
return;
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
Bit16u raw_tss_selector;
|
|
|
|
bx_selector_t tss_selector, gate_cs_selector;
|
|
|
|
bx_descriptor_t tss_descriptor, gate_cs_descriptor;
|
|
|
|
Bit16u gate_cs_raw;
|
|
|
|
Bit32u temp_eIP;
|
|
|
|
|
2005-05-19 22:13:08 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2005-08-02 01:40:17 +04:00
|
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
2005-05-19 22:13:08 +04:00
|
|
|
if (descriptor.type != BX_386_CALL_GATE) {
|
|
|
|
BX_ERROR(("jump_protected: gate type %u unsupported in long mode", (unsigned) descriptor.type));
|
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
// call gate DPL must be >= CPL else #GP(gate selector)
|
2005-05-19 22:13:08 +04:00
|
|
|
if (descriptor.dpl < CPL) {
|
2005-08-02 01:40:17 +04:00
|
|
|
BX_ERROR(("jump_protected: call gate.dpl < CPL"));
|
2005-05-19 22:13:08 +04:00
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
// call gate DPL must be >= gate selector RPL else #GP(gate selector)
|
2005-05-19 22:13:08 +04:00
|
|
|
if (descriptor.dpl < selector.rpl) {
|
2005-08-02 01:40:17 +04:00
|
|
|
BX_ERROR(("jump_protected: call gate.dpl < selector.rpl"));
|
2005-05-19 22:13:08 +04:00
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
switch ( descriptor.type ) {
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_SYS_SEGMENT_AVAIL_286_TSS:
|
|
|
|
case BX_SYS_SEGMENT_AVAIL_386_TSS:
|
2005-05-19 22:13:08 +04:00
|
|
|
|
2005-02-27 20:41:45 +03:00
|
|
|
//if ( descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS )
|
2001-05-30 22:56:02 +04:00
|
|
|
// BX_INFO(("jump to 286 TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
//else
|
2001-05-30 22:56:02 +04:00
|
|
|
// BX_INFO(("jump to 386 TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Task State Seg must be present, else #NP(TSS selector)
|
|
|
|
// checked in task_switch()
|
|
|
|
|
|
|
|
// SWITCH_TASKS _without_ nesting to TSS
|
2004-05-11 01:05:51 +04:00
|
|
|
task_switch(&selector, &descriptor, BX_TASK_FROM_JUMP, dword1, dword2);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
// EIP must be in code seg limit, else #GP(0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: EIP not within CS limits"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
|
|
|
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_TASK_GATE:
|
2001-04-10 05:04:59 +04:00
|
|
|
// task gate must be present else #NP(gate selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: task gate.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// examine selector to TSS, given in Task Gate descriptor
|
|
|
|
// must specify global in the local/global bit else #GP(TSS selector)
|
|
|
|
raw_tss_selector = descriptor.u.taskgate.tss_selector;
|
|
|
|
parse_selector(raw_tss_selector, &tss_selector);
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
if (tss_selector.ti) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: tss_selector.ti=1"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// index must be within GDT limits else #GP(TSS selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// descriptor AR byte must specify available TSS
|
|
|
|
// else #GP(TSS selector)
|
|
|
|
parse_descriptor(dword1, dword2, &tss_descriptor);
|
|
|
|
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
|
2001-08-31 20:06:32 +04:00
|
|
|
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
|
2001-08-31 20:06:32 +04:00
|
|
|
BX_ERROR(("jump_protected: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// task state segment must be present, else #NP(tss selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(tss_descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: task descriptor.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// SWITCH_TASKS _without_ nesting to TSS
|
|
|
|
task_switch(&tss_selector, &tss_descriptor,
|
|
|
|
BX_TASK_FROM_JUMP, dword1, dword2);
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
// EIP must be within code segment limit, else #GP(0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
|
|
|
|
temp_eIP = EIP;
|
|
|
|
else
|
|
|
|
temp_eIP = IP;
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: EIP > CS.limit"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
break;
|
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
case BX_286_CALL_GATE:
|
|
|
|
BX_DEBUG(("jump_protected: JUMP TO 286 CALL GATE"));
|
|
|
|
|
|
|
|
// gate must be present else #NP(gate selector)
|
|
|
|
if (! IS_PRESENT(descriptor)) {
|
|
|
|
BX_ERROR(("jump_protected: call gate not present !"));
|
|
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// examine selector to code segment given in call gate descriptor
|
|
|
|
// selector must not be null, else #GP(0)
|
|
|
|
gate_cs_raw = descriptor.u.gate286.dest_selector;
|
|
|
|
|
|
|
|
if ((gate_cs_raw & 0xfffc) == 0) {
|
|
|
|
BX_ERROR(("jump_protected: CS selector null"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
parse_selector(gate_cs_raw, &gate_cs_selector);
|
|
|
|
// selector must be within its descriptor table limits else #GP(CS selector)
|
|
|
|
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
|
|
|
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
|
|
|
|
|
|
|
|
// check code-segment descriptor, no need to check selector RPL
|
|
|
|
// for non-conforming segments
|
|
|
|
check_cs(&gate_cs_descriptor, gate_cs_raw, 0);
|
|
|
|
|
|
|
|
branch_far32(&gate_cs_selector, &gate_cs_descriptor,
|
|
|
|
descriptor.u.gate286.dest_offset, CPL);
|
|
|
|
return;
|
|
|
|
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_386_CALL_GATE:
|
2005-05-20 21:04:42 +04:00
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
2005-05-19 22:13:08 +04:00
|
|
|
if (BX_CPU_THIS_PTR msr.lma) { // LONG MODE
|
|
|
|
BX_PANIC(("jump to CALL_GATE64 still not implemented"));
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2005-05-20 21:04:42 +04:00
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// gate must be present else #NP(gate selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: task gate.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// examine selector to code segment given in call gate descriptor
|
|
|
|
// selector must not be null, else #GP(0)
|
|
|
|
gate_cs_raw = descriptor.u.gate386.dest_selector;
|
2005-08-02 01:40:17 +04:00
|
|
|
|
2004-05-11 01:05:51 +04:00
|
|
|
if ((gate_cs_raw & 0xfffc) == 0) {
|
|
|
|
BX_ERROR(("jump_protected: CS selector null"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
parse_selector(gate_cs_raw, &gate_cs_selector);
|
2001-04-10 05:04:59 +04:00
|
|
|
// selector must be within its descriptor table limits else #GP(CS selector)
|
2004-05-11 01:05:51 +04:00
|
|
|
fetch_raw_descriptor(&gate_cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &gate_cs_descriptor);
|
2005-08-02 01:40:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
STILL NOT IMPLEMENTED ...
|
|
|
|
|
|
|
|
IF (LONG_MODE)
|
2004-05-11 01:05:51 +04:00
|
|
|
{
|
2005-08-02 01:40:17 +04:00
|
|
|
// in long mode, we need to read the 2nd half of a 16-byte call-gate
|
|
|
|
// from the gdt/ldt to get the upper 32 bits of the target RIP
|
|
|
|
temp_upper = READ_MEM.q [temp_sel+8]
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
// Make sure the extended attribute bits are all zero.
|
|
|
|
IF (temp_upper's extended attribute bits != 0)
|
|
|
|
EXCEPTION [#GP(temp_sel)]
|
|
|
|
|
|
|
|
// concatenate both halves of RIP
|
|
|
|
temp_RIP = tempRIP + (temp_upper SHL 32)
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2005-08-02 01:40:17 +04:00
|
|
|
// set up new CS base, attr, limits
|
|
|
|
CS = READ_DESCRIPTOR (temp_desc.segment, clg_chk)
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
IF ((64BIT_MODE) && (temp_RIP is non-canonical)
|
|
|
|
|| (!64BIT_MODE) && (temp_RIP > CS.limit))
|
|
|
|
{
|
|
|
|
EXCEPTION [#GP(0)]
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
RIP = temp_RIP
|
|
|
|
*/
|
|
|
|
|
|
|
|
// check code-segment descriptor, no need to check selector RPL
|
|
|
|
// for non-conforming segments
|
|
|
|
check_cs(&gate_cs_descriptor, gate_cs_raw, 0);
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// IP must be in code segment limit else #GP(0)
|
|
|
|
if ( descriptor.u.gate386.dest_offset >
|
2004-05-11 01:05:51 +04:00
|
|
|
gate_cs_descriptor.u.segment.limit_scaled )
|
|
|
|
{
|
2005-07-21 05:59:05 +04:00
|
|
|
BX_ERROR(("jump_protected: EIP > limit"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// load CS:IP from call gate
|
|
|
|
// load CS cache with new code segment
|
|
|
|
// set rpl of CS to CPL
|
|
|
|
load_cs(&gate_cs_selector, &gate_cs_descriptor, CPL);
|
|
|
|
EIP = descriptor.u.gate386.dest_offset;
|
|
|
|
return;
|
|
|
|
|
|
|
|
default:
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("jump_protected: gate type %u unsupported", (unsigned) descriptor.type));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2003-03-03 02:59:12 +03:00
|
|
|
void BX_CPP_AttrRegparmN(3)
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
bx_selector_t cs_selector;
|
|
|
|
Bit32u dword1, dword2;
|
|
|
|
bx_descriptor_t cs_descriptor;
|
|
|
|
|
|
|
|
/* new cs selector must not be null, else #GP(0) */
|
2004-05-11 01:05:51 +04:00
|
|
|
if ((cs_raw & 0xfffc) == 0) {
|
|
|
|
BX_ERROR(("call_protected: CS selector null"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_selector(cs_raw, &cs_selector);
|
|
|
|
// check new CS selector index within its descriptor limits,
|
|
|
|
// else #GP(new CS selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
|
|
|
|
|
|
|
// examine AR byte of selected descriptor for various legal values
|
|
|
|
if (cs_descriptor.valid==0) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("call_protected: invalid CS descriptor"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-31 21:57:27 +04:00
|
|
|
if (cs_descriptor.segment) // normal segment
|
|
|
|
{
|
2005-08-02 01:40:17 +04:00
|
|
|
check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
// push return address onto stack (CS padded to 64bits)
|
|
|
|
push_64((Bit64u) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_64(RIP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else
|
2005-08-02 01:40:17 +04:00
|
|
|
#endif
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// push return address onto stack (CS padded to 32bits)
|
|
|
|
push_32((Bit32u) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_32(EIP);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2005-08-02 01:40:17 +04:00
|
|
|
else {
|
|
|
|
// push return address onto stack
|
2001-04-10 05:04:59 +04:00
|
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_16(IP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// load code segment descriptor into CS cache
|
|
|
|
// load CS with new code segment selector
|
|
|
|
// set RPL of CS to CPL
|
2005-08-02 01:40:17 +04:00
|
|
|
branch_far64(&cs_selector, &cs_descriptor, disp, CPL);
|
2005-07-21 05:59:05 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else { // gate & special segment
|
|
|
|
bx_descriptor_t gate_descriptor;
|
|
|
|
bx_selector_t gate_selector;
|
|
|
|
Bit32u new_EIP;
|
|
|
|
Bit16u dest_selector;
|
|
|
|
Bit16u raw_tss_selector;
|
|
|
|
bx_selector_t tss_selector;
|
|
|
|
bx_descriptor_t tss_descriptor;
|
|
|
|
Bit32u temp_eIP;
|
|
|
|
|
|
|
|
/* 1 level of indirection via gate, switch gate & cs */
|
|
|
|
gate_descriptor = cs_descriptor;
|
|
|
|
gate_selector = cs_selector;
|
|
|
|
|
2005-05-19 22:13:08 +04:00
|
|
|
// descriptor DPL must be >= CPL else #GP(gate selector)
|
2005-05-19 22:15:19 +04:00
|
|
|
if (gate_descriptor.dpl < CPL) {
|
2005-05-19 22:13:08 +04:00
|
|
|
BX_ERROR(("jump_protected: descriptor.dpl < CPL"));
|
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// descriptor DPL must be >= gate selector RPL else #GP(gate selector)
|
2005-05-19 22:15:19 +04:00
|
|
|
if (gate_descriptor.dpl < gate_selector.rpl) {
|
2005-05-19 22:13:08 +04:00
|
|
|
BX_ERROR(("jump_protected: descriptor.dpl < selector.rpl"));
|
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
switch (gate_descriptor.type) {
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_SYS_SEGMENT_AVAIL_286_TSS:
|
|
|
|
case BX_SYS_SEGMENT_AVAIL_386_TSS:
|
2005-05-19 22:13:08 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
//if (gate_descriptor.type==1)
|
2001-05-30 22:56:02 +04:00
|
|
|
// BX_INFO(("call_protected: 16bit available TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
//else
|
2001-05-30 22:56:02 +04:00
|
|
|
// BX_INFO(("call_protected: 32bit available TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Task State Seg must be present, else #NP(TSS selector)
|
|
|
|
// checked in task_switch()
|
|
|
|
|
|
|
|
// SWITCH_TASKS _without_ nesting to TSS
|
|
|
|
task_switch(&gate_selector, &gate_descriptor,
|
|
|
|
BX_TASK_FROM_CALL_OR_INT, dword1, dword2);
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
// EIP must be in code seg limit, else #GP(0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: EIP not within CS limits"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
|
|
|
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_TASK_GATE:
|
2001-04-10 05:04:59 +04:00
|
|
|
// task gate must be present else #NP(gate selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: task gate.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// examine selector to TSS, given in Task Gate descriptor
|
|
|
|
// must specify global in the local/global bit else #TS(TSS selector)
|
|
|
|
|
|
|
|
raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
|
|
|
|
parse_selector(raw_tss_selector, &tss_selector);
|
|
|
|
if (tss_selector.ti) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: tss_selector.ti=1"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// index must be within GDT limits else #TS(TSS selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// descriptor AR byte must specify available TSS
|
|
|
|
// else #TS(TSS selector)
|
|
|
|
parse_descriptor(dword1, dword2, &tss_descriptor);
|
|
|
|
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// task state segment must be present, else #NP(tss selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(tss_descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: task descriptor.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// SWITCH_TASKS without nesting to TSS
|
|
|
|
task_switch(&tss_selector, &tss_descriptor,
|
|
|
|
BX_TASK_FROM_CALL_OR_INT, dword1, dword2);
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
// EIP must be within code segment limit, else #TS(0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
|
|
|
|
temp_eIP = EIP;
|
|
|
|
else
|
|
|
|
temp_eIP = IP;
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2004-05-11 01:05:51 +04:00
|
|
|
if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
|
|
|
{
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: EIP > CS.limit"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
|
|
|
|
2005-02-27 20:41:45 +03:00
|
|
|
case BX_286_CALL_GATE:
|
|
|
|
case BX_386_CALL_GATE:
|
2005-05-19 22:13:08 +04:00
|
|
|
|
|
|
|
//if (gate_descriptor.type==BX_286_CALL_GATE)
|
|
|
|
// BX_INFO(("CALL: 16bit call gate"));
|
|
|
|
//else
|
|
|
|
// BX_INFO(("CALL: 32bit call gate"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// call gate must be present, else #NP(call gate selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: not present"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, gate_selector.value & 0xfffc, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// examine code segment selector in call gate descriptor
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
dest_selector = gate_descriptor.u.gate286.dest_selector;
|
|
|
|
new_EIP = gate_descriptor.u.gate286.dest_offset;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
dest_selector = gate_descriptor.u.gate386.dest_selector;
|
|
|
|
new_EIP = gate_descriptor.u.gate386.dest_offset;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// selector must not be null else #GP(0)
|
|
|
|
if ( (dest_selector & 0xfffc) == 0 ) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: selector in gate null"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_selector(dest_selector, &cs_selector);
|
|
|
|
// selector must be within its descriptor table limits,
|
|
|
|
// else #GP(code segment selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
|
|
|
|
|
|
|
// AR byte of selected descriptor must indicate code segment,
|
|
|
|
// else #GP(code segment selector)
|
|
|
|
// DPL of selected descriptor must be <= CPL,
|
|
|
|
// else #GP(code segment selector)
|
2005-07-21 05:59:05 +04:00
|
|
|
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
|
2001-04-10 05:04:59 +04:00
|
|
|
cs_descriptor.u.segment.executable==0 ||
|
2004-11-02 19:10:02 +03:00
|
|
|
cs_descriptor.dpl > CPL)
|
|
|
|
{
|
2005-07-20 05:26:47 +04:00
|
|
|
BX_ERROR(("call_protected: selected descriptor is not code"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// CALL GATE TO MORE PRIVILEGE
|
|
|
|
// if non-conforming code segment and DPL < CPL then
|
|
|
|
if ( (cs_descriptor.u.segment.c_ed==0) &&
|
2004-11-02 19:10:02 +03:00
|
|
|
(cs_descriptor.dpl < CPL) )
|
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit16u SS_for_cpl_x;
|
|
|
|
Bit32u ESP_for_cpl_x;
|
|
|
|
bx_selector_t ss_selector;
|
|
|
|
bx_descriptor_t ss_descriptor;
|
|
|
|
unsigned room_needed;
|
|
|
|
Bit8u param_count;
|
|
|
|
Bit16u return_SS, return_CS;
|
|
|
|
Bit32u return_ESP, return_EIP;
|
|
|
|
Bit32u return_ss_base;
|
|
|
|
unsigned i;
|
|
|
|
Bit16u parameter_word[32];
|
|
|
|
Bit32u parameter_dword[32];
|
|
|
|
Bit32u temp_ESP;
|
|
|
|
|
2004-02-12 02:47:55 +03:00
|
|
|
// BX_INFO(("CALL: Call Gate: to more priviliged level"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2004-02-12 02:47:55 +03:00
|
|
|
// Help for OS/2
|
|
|
|
BX_CPU_THIS_PTR except_chk = 1;
|
|
|
|
BX_CPU_THIS_PTR except_cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
|
|
|
BX_CPU_THIS_PTR except_ss = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// get new SS selector for new privilege level from TSS
|
|
|
|
get_SS_ESP_from_TSS(cs_descriptor.dpl,
|
|
|
|
&SS_for_cpl_x, &ESP_for_cpl_x);
|
|
|
|
|
|
|
|
// check selector & descriptor for new SS:
|
|
|
|
// selector must not be null, else #TS(0)
|
|
|
|
if ( (SS_for_cpl_x & 0xfffc) == 0 ) {
|
2005-08-02 01:40:17 +04:00
|
|
|
if (! IsLongMode()) {
|
|
|
|
BX_ERROR(("call_protected: new SS null"));
|
|
|
|
exception(BX_TS_EXCEPTION, 0, 0);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// selector index must be within its descriptor table limits,
|
|
|
|
// else #TS(SS selector)
|
|
|
|
parse_selector(SS_for_cpl_x, &ss_selector);
|
2004-11-02 19:10:02 +03:00
|
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
|
|
|
|
|
|
|
// selector's RPL must equal DPL of code segment,
|
|
|
|
// else #TS(SS selector)
|
|
|
|
if (ss_selector.rpl != cs_descriptor.dpl) {
|
2004-02-12 02:47:55 +03:00
|
|
|
BX_DEBUG(("call_protected: SS selector.rpl != CS descr.dpl"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// stack segment DPL must equal DPL of code segment,
|
|
|
|
// else #TS(SS selector)
|
|
|
|
if (ss_descriptor.dpl != cs_descriptor.dpl) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("call_protected: SS descr.rpl != CS descr.dpl"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// descriptor must indicate writable data segment,
|
|
|
|
// else #TS(SS selector)
|
2005-07-21 05:59:05 +04:00
|
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
2001-04-10 05:04:59 +04:00
|
|
|
ss_descriptor.u.segment.executable ||
|
2004-11-02 19:10:02 +03:00
|
|
|
ss_descriptor.u.segment.r_w==0)
|
|
|
|
{
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("call_protected: ss descriptor not writable data seg"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// segment must be present, else #SS(SS selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
2005-08-02 01:40:17 +04:00
|
|
|
if (! IsLongMode()) {
|
|
|
|
BX_ERROR(("call_protected: ss descriptor not present"));
|
|
|
|
exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
if ( cs_descriptor.u.segment.d_b )
|
|
|
|
// new stack must have room for parameters plus 16 bytes
|
|
|
|
room_needed = 16;
|
|
|
|
else
|
|
|
|
// new stack must have room for parameters plus 8 bytes
|
|
|
|
room_needed = 8;
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// get word count from call gate, mask to 5 bits
|
|
|
|
param_count = gate_descriptor.u.gate286.word_count & 0x1f;
|
|
|
|
room_needed += param_count*2;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
// get word count from call gate, mask to 5 bits
|
|
|
|
param_count = gate_descriptor.u.gate386.dword_count & 0x1f;
|
|
|
|
room_needed += param_count*4;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// new stack must have room for parameters plus return info
|
|
|
|
// else #SS(SS selector)
|
|
|
|
if ( !can_push(&ss_descriptor, ESP_for_cpl_x, room_needed) ) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("call_protected: stack doesn't have room"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// new eIP must be in code segment limit else #GP(0)
|
|
|
|
if ( new_EIP > cs_descriptor.u.segment.limit_scaled ) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: EIP not within CS limits"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// save return SS:eSP to be pushed on new stack
|
|
|
|
return_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
|
|
|
return_ESP = ESP;
|
|
|
|
else
|
|
|
|
return_ESP = SP;
|
2005-02-28 21:56:05 +03:00
|
|
|
return_ss_base = BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// save return CS:eIP to be pushed on new stack
|
|
|
|
return_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
|
|
|
if ( cs_descriptor.u.segment.d_b )
|
|
|
|
return_EIP = EIP;
|
|
|
|
else
|
2004-11-02 19:10:02 +03:00
|
|
|
return_EIP = IP;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
for (i=0; i<param_count; i++) {
|
|
|
|
access_linear(return_ss_base + return_ESP + i*2,
|
|
|
|
2, 0, BX_READ, ¶meter_word[i]);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
for (i=0; i<param_count; i++) {
|
|
|
|
access_linear(return_ss_base + return_ESP + i*4,
|
|
|
|
4, 0, BX_READ, ¶meter_dword[i]);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* load new SS:SP value from TSS */
|
|
|
|
/* load SS descriptor */
|
|
|
|
load_ss(&ss_selector, &ss_descriptor, ss_descriptor.dpl);
|
|
|
|
if (ss_descriptor.u.segment.d_b)
|
|
|
|
ESP = ESP_for_cpl_x;
|
|
|
|
else
|
|
|
|
SP = (Bit16u) ESP_for_cpl_x;
|
|
|
|
|
|
|
|
/* load new CS:IP value from gate */
|
|
|
|
/* load CS descriptor */
|
|
|
|
/* set CPL to stack segment DPL */
|
|
|
|
/* set RPL of CS to CPL */
|
|
|
|
load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
|
|
|
|
EIP = new_EIP;
|
|
|
|
|
|
|
|
// push pointer of old stack onto new stack
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
push_16(return_SS);
|
|
|
|
push_16((Bit16u) return_ESP);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
push_32(return_SS);
|
|
|
|
push_32(return_ESP);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* get word count from call gate, mask to 5 bits */
|
|
|
|
/* copy parameters from old stack onto new stack */
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
|
|
|
temp_ESP = ESP;
|
|
|
|
else
|
|
|
|
temp_ESP = SP;
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
for (i=param_count; i>0; i--) {
|
|
|
|
push_16(parameter_word[i-1]);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
for (i=param_count; i>0; i--) {
|
|
|
|
push_32(parameter_dword[i-1]);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// push return address onto new stack
|
2005-03-05 00:03:22 +03:00
|
|
|
if (gate_descriptor.type==BX_286_CALL_GATE) {
|
2001-04-10 05:04:59 +04:00
|
|
|
push_16(return_CS);
|
|
|
|
push_16((Bit16u) return_EIP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
push_32(return_CS);
|
|
|
|
push_32(return_EIP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2004-02-12 02:47:55 +03:00
|
|
|
// Help for OS/2
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_CPU_THIS_PTR except_chk = 0;
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
// CALL GATE TO SAME PRIVILEGE
|
|
|
|
else {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
|
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
|
|
|
// call gate 64bit, push return address onto stack
|
|
|
|
push_64(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_64(RIP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2005-08-02 01:40:17 +04:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (gate_descriptor.type == BX_386_CALL_GATE) {
|
|
|
|
// call gate 32bit, push return address onto stack
|
2001-04-10 05:04:59 +04:00
|
|
|
push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_32(EIP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
2005-08-02 01:40:17 +04:00
|
|
|
// call gate 16bit, push return address onto stack
|
2001-04-10 05:04:59 +04:00
|
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_16(IP);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// load CS:EIP from gate
|
|
|
|
// load code segment descriptor into CS register
|
|
|
|
// set RPL of CS to CPL
|
2005-08-02 01:40:17 +04:00
|
|
|
branch_far64(&cs_selector, &cs_descriptor, new_EIP, CPL);
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("call_protected: call gate: should not get here"));
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
default:
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("call_protected: gate type %u unsupported", (unsigned) cs_descriptor.type));
|
|
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2003-03-03 02:59:12 +03:00
|
|
|
void BX_CPP_AttrRegparmN(2)
|
2002-09-18 02:50:53 +04:00
|
|
|
BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
Bit16u raw_cs_selector, raw_ss_selector;
|
|
|
|
bx_selector_t cs_selector, ss_selector;
|
|
|
|
bx_descriptor_t cs_descriptor, ss_descriptor;
|
|
|
|
Bit32u stack_cs_offset, stack_param_offset;
|
2005-07-20 05:26:47 +04:00
|
|
|
bx_address return_RIP, return_RSP, temp_RSP;
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit32u dword1, dword2;
|
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
/* + 6+N*2: SS | +12+N*4: SS | +24+N*8 SS */
|
|
|
|
/* + 4+N*2: SP | + 8+N*4: ESP | +16+N*8 RSP */
|
|
|
|
/* parm N | + parm N | + parm N */
|
|
|
|
/* parm 3 | + parm 3 | + parm 3 */
|
|
|
|
/* parm 2 | + parm 2 | + parm 2 */
|
|
|
|
/* + 4: parm 1 | + 8: parm 1 | +16: parm 1 */
|
|
|
|
/* + 2: CS | + 4: CS | + 8: CS */
|
|
|
|
/* + 0: IP | + 0: EIP | + 0: RIP */
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if ( i->os64L() ) {
|
|
|
|
/* operand size=64: 2nd qword on stack must be within stack limits,
|
|
|
|
* else #SS(0); */
|
|
|
|
if (!can_pop(16)) {
|
|
|
|
BX_ERROR(("return_protected: 2rd qword not in stack limits"));
|
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
stack_cs_offset = 8;
|
|
|
|
stack_param_offset = 16;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2002-09-18 09:36:48 +04:00
|
|
|
if ( i->os32L() ) {
|
2005-07-20 05:26:47 +04:00
|
|
|
/* operand size=32: 2nd dword on stack must be within stack limits,
|
2001-04-10 05:04:59 +04:00
|
|
|
* else #SS(0); */
|
2005-07-20 05:26:47 +04:00
|
|
|
if (!can_pop(8)) {
|
|
|
|
BX_ERROR(("return_protected: 2rd dword not in stack limits"));
|
2004-05-11 01:05:51 +04:00
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
stack_cs_offset = 4;
|
|
|
|
stack_param_offset = 8;
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2005-03-12 19:40:14 +03:00
|
|
|
else {
|
2001-04-10 05:04:59 +04:00
|
|
|
/* operand size=16: second word on stack must be within stack limits,
|
2005-03-05 00:03:22 +03:00
|
|
|
* else #SS(0); */
|
2001-04-10 05:04:59 +04:00
|
|
|
if ( !can_pop(4) ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("return_protected: 2nd word not in stack limits"));
|
2004-05-11 01:05:51 +04:00
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
stack_cs_offset = 2;
|
|
|
|
stack_param_offset = 4;
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2005-08-02 01:40:17 +04:00
|
|
|
if (StackAddrSize64()) temp_RSP = RSP;
|
2005-07-20 05:26:47 +04:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) temp_RSP = ESP;
|
|
|
|
else temp_RSP = SP;
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// return selector RPL must be >= CPL, else #GP(return selector)
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP +
|
2001-04-10 05:04:59 +04:00
|
|
|
stack_cs_offset, 2, CPL==3, BX_READ, &raw_cs_selector);
|
|
|
|
parse_selector(raw_cs_selector, &cs_selector);
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2005-07-21 05:59:05 +04:00
|
|
|
if (cs_selector.rpl < CPL) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_ERROR(("return_protected: CS.rpl < CPL"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// if return selector RPL == CPL then
|
2005-07-11 00:32:32 +04:00
|
|
|
// RETURN TO SAME PRIVILEGE LEVEL
|
2005-07-21 05:59:05 +04:00
|
|
|
if (cs_selector.rpl == CPL)
|
|
|
|
{
|
|
|
|
// BX_INFO(("return: to same level %04x:%08x",
|
2001-04-10 05:04:59 +04:00
|
|
|
// BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
// BX_CPU_THIS_PTR prev_eip));
|
2004-11-02 19:10:02 +03:00
|
|
|
|
|
|
|
// return selector must be non-null, else #GP(0) (???)
|
2005-07-21 05:59:05 +04:00
|
|
|
if ((raw_cs_selector & 0xfffc) == 0) {
|
2005-02-16 22:59:03 +03:00
|
|
|
BX_INFO(("return_protected: CS null"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// selector index must be within its descriptor table limits,
|
|
|
|
// else #GP(selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
// descriptor AR byte must indicate code segment, else #GP(selector)
|
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
check_cs(&cs_descriptor, raw_cs_selector, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// top word on stack must be within stack limits, else #SS(0)
|
|
|
|
if ( !can_pop(stack_param_offset + pop_bytes) ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("return_protected: top word not in stack limits"));
|
|
|
|
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
|
|
|
|
8, CPL==3, BX_READ, &return_RIP);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2005-07-20 05:26:47 +04:00
|
|
|
Bit32u return_EIP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, CPL==3, BX_READ, &return_EIP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RIP = return_EIP;
|
2005-03-12 19:40:14 +03:00
|
|
|
}
|
|
|
|
else {
|
2005-07-20 05:26:47 +04:00
|
|
|
Bit16u return_IP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, CPL==3, BX_READ, &return_IP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RIP = return_IP;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-31 21:57:27 +04:00
|
|
|
branch_far64(&cs_selector, &cs_descriptor, return_RIP, CPL);
|
2005-07-20 05:26:47 +04:00
|
|
|
|
2005-07-21 05:59:05 +04:00
|
|
|
// increment eSP
|
2005-07-20 05:26:47 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2005-08-02 01:40:17 +04:00
|
|
|
if (StackAddrSize64())
|
|
|
|
RSP += stack_param_offset + pop_bytes;
|
2005-07-20 05:26:47 +04:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
|
|
|
ESP += stack_param_offset + pop_bytes;
|
|
|
|
else
|
|
|
|
SP += stack_param_offset + pop_bytes;
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
/* RETURN TO OUTER PRIVILEGE LEVEL */
|
|
|
|
else {
|
2005-07-20 05:26:47 +04:00
|
|
|
/* + 6+N*2: SS | +12+N*4: SS | +24+N*8 SS */
|
|
|
|
/* + 4+N*2: SP | + 8+N*4: ESP | +16+N*8 RSP */
|
|
|
|
/* parm N | + parm N | + parm N */
|
|
|
|
/* parm 3 | + parm 3 | + parm 3 */
|
|
|
|
/* parm 2 | + parm 2 | + parm 2 */
|
|
|
|
/* + 4: parm 1 | + 8: parm 1 | +16: parm 1 */
|
|
|
|
/* + 2: CS | + 4: CS | + 8: CS */
|
|
|
|
/* + 0: IP | + 0: EIP | + 0: RIP */
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-11 00:32:32 +04:00
|
|
|
//BX_INFO(("return: to outer level %04x:%08x",
|
|
|
|
// BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
|
|
|
|
// BX_CPU_THIS_PTR prev_eip));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
BX_PANIC(("RETF64: return to outer priviledge level still not implemented !"));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
/* top 16+immediate bytes on stack must be within stack limits, else #SS(0) */
|
|
|
|
if ( !can_pop(16 + pop_bytes) ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("return_protected: 16 bytes not within stack limits"));
|
|
|
|
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
/* top 8+immediate bytes on stack must be within stack limits, else #SS(0) */
|
|
|
|
if ( !can_pop(8 + pop_bytes) ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("return_protected: 8 bytes not within stack limits"));
|
|
|
|
exception(BX_SS_EXCEPTION, 0, 0); /* #SS(0) */
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* examine return CS selector and associated descriptor */
|
|
|
|
|
2005-07-21 05:59:05 +04:00
|
|
|
/* selector must be non-null else #GP(0) */
|
2001-04-10 05:04:59 +04:00
|
|
|
if ( (raw_cs_selector & 0xfffc) == 0 ) {
|
2005-02-16 22:59:03 +03:00
|
|
|
BX_INFO(("return_protected: CS selector null"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* selector index must be within its descriptor table limits,
|
|
|
|
* else #GP(selector) */
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
|
|
|
|
2005-08-02 01:40:17 +04:00
|
|
|
check_cs(&cs_descriptor, raw_cs_selector, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* examine return SS selector and associated descriptor: */
|
2005-07-21 05:59:05 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24 + pop_bytes,
|
|
|
|
2, 0, BX_READ, &raw_ss_selector);
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16 + pop_bytes,
|
|
|
|
8, 0, BX_READ, &return_RSP);
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
|
|
|
8, 0, BX_READ, &return_RIP);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2005-07-20 05:26:47 +04:00
|
|
|
Bit16u return_EIP, return_ESP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 12 + pop_bytes,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &raw_ss_selector);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8 + pop_bytes,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, 0, BX_READ, &return_ESP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RSP = return_ESP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, 0, BX_READ, &return_EIP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RIP = return_EIP;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
2005-07-20 05:26:47 +04:00
|
|
|
Bit16u return_SP, return_IP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 6 + pop_bytes,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &raw_ss_selector);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4 + pop_bytes,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &return_SP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RSP = return_SP;
|
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &return_IP);
|
2005-07-20 05:26:47 +04:00
|
|
|
return_RIP = return_IP;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* selector must be non-null else #GP(0) */
|
|
|
|
if ( (raw_ss_selector & 0xfffc) == 0 ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_INFO(("return_protected: SS selector null"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* selector index must be within its descriptor table limits,
|
|
|
|
* else #GP(selector) */
|
|
|
|
parse_selector(raw_ss_selector, &ss_selector);
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
|
|
|
|
|
|
|
/* selector RPL must = RPL of the return CS selector,
|
|
|
|
* else #GP(selector) */
|
|
|
|
if (ss_selector.rpl != cs_selector.rpl) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("return_protected: ss.rpl != cs.rpl"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* descriptor AR byte must indicate a writable data segment,
|
2005-07-21 05:59:05 +04:00
|
|
|
* else #GP(selector) */
|
|
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
2001-04-10 05:04:59 +04:00
|
|
|
ss_descriptor.u.segment.executable ||
|
2004-11-02 19:10:02 +03:00
|
|
|
ss_descriptor.u.segment.r_w==0)
|
|
|
|
{
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("return_protected: SS.AR byte not writable data"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* descriptor dpl must = RPL of the return CS selector,
|
2004-11-02 19:10:02 +03:00
|
|
|
* else #GP(selector) (???) */
|
2001-04-10 05:04:59 +04:00
|
|
|
if (ss_descriptor.dpl != cs_selector.rpl) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("return_protected: SS.dpl != cs.rpl"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* segment must be present else #SS(selector) */
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
|
|
|
BX_ERROR(("ss.present == 0"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_SS_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-31 21:57:27 +04:00
|
|
|
branch_far64(&cs_selector, &cs_descriptor, return_RIP, cs_selector.rpl);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* load SS:SP from stack */
|
|
|
|
/* load SS-cache with return SS descriptor */
|
|
|
|
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
|
|
|
|
if (ss_descriptor.u.segment.d_b)
|
2005-07-21 05:59:05 +04:00
|
|
|
RSP = return_RSP + pop_bytes;
|
2001-04-10 05:04:59 +04:00
|
|
|
else
|
2005-07-20 05:26:47 +04:00
|
|
|
SP = (Bit16u) return_RSP + pop_bytes;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* check ES, DS, FS, GS for validity */
|
|
|
|
validate_seg_regs();
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2003-03-03 02:59:12 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1)
|
2002-09-18 02:50:53 +04:00
|
|
|
BX_CPU_C::iret_protected(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
Bit16u raw_cs_selector, raw_ss_selector;
|
|
|
|
bx_selector_t cs_selector, ss_selector;
|
|
|
|
Bit32u dword1, dword2;
|
|
|
|
bx_descriptor_t cs_descriptor, ss_descriptor;
|
|
|
|
|
2002-09-12 22:10:46 +04:00
|
|
|
if ( BX_CPU_THIS_PTR get_NT () ) { /* NT = 1: RETURN FROM NESTED TASK */
|
2001-04-10 05:04:59 +04:00
|
|
|
/* what's the deal with NT & VM ? */
|
|
|
|
Bit32u base32;
|
|
|
|
Bit16u raw_link_selector;
|
|
|
|
bx_selector_t link_selector;
|
|
|
|
bx_descriptor_t tss_descriptor;
|
|
|
|
|
2002-09-12 22:10:46 +04:00
|
|
|
if ( BX_CPU_THIS_PTR get_VM () )
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_PANIC(("iret_protected: VM sholdn't be set here !"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-30 22:56:02 +04:00
|
|
|
//BX_INFO(("IRET: nested task return"));
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR tr.cache.valid==0)
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("IRET: TR not valid"));
|
2005-03-05 00:03:22 +03:00
|
|
|
if (BX_CPU_THIS_PTR tr.cache.type == BX_SYS_SEGMENT_AVAIL_286_TSS)
|
2001-04-10 05:04:59 +04:00
|
|
|
base32 = BX_CPU_THIS_PTR tr.cache.u.tss286.base;
|
2005-03-05 00:03:22 +03:00
|
|
|
else if (BX_CPU_THIS_PTR tr.cache.type == BX_SYS_SEGMENT_AVAIL_386_TSS)
|
2001-04-10 05:04:59 +04:00
|
|
|
base32 = BX_CPU_THIS_PTR tr.cache.u.tss386.base;
|
|
|
|
else {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("IRET: TR not valid"));
|
2001-04-10 05:04:59 +04:00
|
|
|
base32 = 0; // keep compiler happy
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// examine back link selector in TSS addressed by current TR:
|
2005-07-31 21:57:27 +04:00
|
|
|
access_linear(base32, 2, 0, BX_READ, &raw_link_selector);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// must specify global, else #TS(new TSS selector)
|
|
|
|
parse_selector(raw_link_selector, &link_selector);
|
2005-03-05 00:03:22 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
if (link_selector.ti) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: link selector.ti=1"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// index must be within GDT limits, else #TS(new TSS selector)
|
|
|
|
fetch_raw_descriptor(&link_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
|
|
|
|
|
|
|
// AR byte must specify TSS, else #TS(new TSS selector)
|
|
|
|
// new TSS must be busy, else #TS(new TSS selector)
|
|
|
|
parse_descriptor(dword1, dword2, &tss_descriptor);
|
|
|
|
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("iret: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
if ((tss_descriptor.type!=11) && (tss_descriptor.type!=3)) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("iret: TSS selector points to bad TSS"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// TSS must be present, else #NP(new TSS selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(tss_descriptor)) {
|
|
|
|
BX_ERROR(("iret: task descriptor.p == 0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_link_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// switch tasks (without nesting) to TSS specified by back link selector
|
|
|
|
task_switch(&link_selector, &tss_descriptor,
|
|
|
|
BX_TASK_FROM_IRET, dword1, dword2);
|
|
|
|
|
|
|
|
// mark the task just abandoned as not busy
|
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
// EIP must be within code seg limit, else #GP(0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: EIP > CS.limit"));
|
2004-11-02 19:10:02 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2004-11-02 19:10:02 +03:00
|
|
|
return;
|
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
else if (BX_CPU_THIS_PTR msr.lma) { // LONG MODE
|
|
|
|
//BX_DEBUG (("LONG mode IRET"));
|
|
|
|
/* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
|
|
|
|
Bit16u top_nbytes_same, top_nbytes_outer;
|
|
|
|
Bit64u cs_offset, ss_offset;
|
|
|
|
Bit64u new_rip, new_rsp, temp_RSP, new_eflags;
|
|
|
|
Bit8u prev_cpl;
|
|
|
|
|
|
|
|
/* 64bit opsize
|
|
|
|
* ============
|
|
|
|
* SS eSP+32
|
|
|
|
* ESP eSP+24
|
|
|
|
* -------------
|
|
|
|
* EFLAGS eSP+16
|
|
|
|
* CS eSP+8
|
|
|
|
* EIP eSP+0
|
|
|
|
*/
|
|
|
|
|
|
|
|
top_nbytes_same = 24;
|
|
|
|
top_nbytes_outer = 40;
|
|
|
|
cs_offset = 8;
|
|
|
|
ss_offset = 32;
|
|
|
|
|
|
|
|
/* CS on stack must be within stack limits, else #SS(0) */
|
|
|
|
if ( !can_pop(top_nbytes_same) ) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: CS not within stack limits"));
|
2002-09-15 06:23:12 +04:00
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
temp_RSP = RSP;
|
|
|
|
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + cs_offset,
|
2002-09-15 06:23:12 +04:00
|
|
|
2, CPL==3, BX_READ, &raw_cs_selector);
|
|
|
|
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2002-09-15 06:23:12 +04:00
|
|
|
8, CPL==3, BX_READ, &new_rip);
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16,
|
2002-09-15 06:23:12 +04:00
|
|
|
8, CPL==3, BX_READ, &new_eflags);
|
|
|
|
|
|
|
|
// if VM=1 in flags image on stack then STACK_RETURN_TO_V86
|
|
|
|
if (new_eflags & 0x00020000) {
|
|
|
|
BX_PANIC(("iret: no V86 mode in x86-64 LONG mode"));
|
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
parse_selector(raw_cs_selector, &cs_selector);
|
|
|
|
|
|
|
|
// return CS selector must be non-null, else #GP(0)
|
|
|
|
if ( (raw_cs_selector & 0xfffc) == 0 ) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: return CS selector null"));
|
2002-09-15 06:23:12 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
// selector index must be within descriptor table limits,
|
|
|
|
// else #GP(return selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
|
|
|
|
|
|
|
// AR byte must indicate code segment else #GP(return selector)
|
|
|
|
if ( cs_descriptor.valid==0 ||
|
|
|
|
cs_descriptor.segment==0 ||
|
2004-05-11 01:05:51 +04:00
|
|
|
cs_descriptor.u.segment.executable==0 )
|
|
|
|
{
|
|
|
|
BX_ERROR(("iret: AR byte indicated non code segment (%x) %x:%x",
|
|
|
|
raw_cs_selector, dword1, dword2));
|
2002-09-15 06:23:12 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
// return CS selector RPL must be >= CPL, else #GP(return selector)
|
|
|
|
if (cs_selector.rpl < CPL) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: return selector RPL < CPL"));
|
2002-09-15 06:23:12 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2005-03-20 21:08:46 +03:00
|
|
|
if (cs_descriptor.u.segment.c_ed) {
|
|
|
|
// if return code seg descriptor is conforming
|
|
|
|
// and return code seg DPL > return code seg selector RPL
|
|
|
|
// then #GP(return selector)
|
|
|
|
if (cs_descriptor.dpl > cs_selector.rpl)
|
|
|
|
{
|
|
|
|
BX_PANIC(("iret: conforming, DPL > cs_selector.RPL"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2005-03-20 21:08:46 +03:00
|
|
|
else {
|
|
|
|
// if return code seg descriptor is non-conforming
|
|
|
|
// and return code seg DPL != return code seg selector RPL
|
|
|
|
// then #GP(return selector)
|
|
|
|
if (cs_descriptor.dpl != cs_selector.rpl)
|
|
|
|
{
|
2005-07-20 05:26:47 +04:00
|
|
|
BX_INFO(("iret: Return with DPL != RPL. #GP(selector)"));
|
2005-03-20 21:08:46 +03:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
|
|
|
}
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
// segment must be present else #NP(return selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
|
|
|
BX_ERROR(("iret: not present"));
|
2002-09-15 06:23:12 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
if (cs_selector.rpl == CPL
|
2004-11-02 19:10:02 +03:00
|
|
|
&& BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64)
|
|
|
|
{ /* INTERRUPT RETURN TO SAME LEVEL */
|
2002-09-15 06:23:12 +04:00
|
|
|
/* top 24 bytes on stack must be within limits, else #SS(0) */
|
|
|
|
/* satisfied above */
|
|
|
|
|
|
|
|
/* load CS:EIP from stack */
|
|
|
|
/* load CS-cache with new code segment descriptor */
|
|
|
|
load_cs(&cs_selector, &cs_descriptor, CPL);
|
|
|
|
RIP = new_rip;
|
|
|
|
|
|
|
|
/* load EFLAGS with 3rd doubleword from stack */
|
|
|
|
write_eflags(new_eflags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL(), 0, 1);
|
|
|
|
|
|
|
|
/* increment stack by 24 */
|
|
|
|
RSP += top_nbytes_same;
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 bit mode */
|
|
|
|
/* 64bit opsize
|
|
|
|
* ============
|
|
|
|
* SS eSP+32
|
|
|
|
* ESP eSP+24
|
|
|
|
* EFLAGS eSP+16
|
|
|
|
* CS eSP+8
|
|
|
|
* EIP eSP+0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* top 10/20 bytes on stack must be within limits else #SS(0) */
|
|
|
|
if ( !can_pop(top_nbytes_outer) ) {
|
|
|
|
BX_PANIC(("iret: top 10/20 bytes not within stack limits"));
|
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
/* examine return SS selector and associated descriptor */
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + ss_offset,
|
2002-09-15 06:23:12 +04:00
|
|
|
2, 0, BX_READ, &raw_ss_selector);
|
|
|
|
|
2002-09-24 19:41:03 +04:00
|
|
|
if ( (raw_ss_selector & 0xfffc) != 0 ) {
|
|
|
|
parse_selector(raw_ss_selector, &ss_selector);
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
|
2002-09-24 19:41:03 +04:00
|
|
|
/* selector RPL must = RPL of return CS selector,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_selector.rpl != cs_selector.rpl) {
|
|
|
|
BX_PANIC(("iret: SS.rpl != CS.rpl"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2002-09-24 19:41:03 +04:00
|
|
|
/* selector index must be within its descriptor table limits,
|
|
|
|
* else #GP(SS selector) */
|
2004-11-02 19:10:02 +03:00
|
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2002-09-24 19:41:03 +04:00
|
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2002-09-24 12:29:06 +04:00
|
|
|
/* AR byte must indicate a writable data segment,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_descriptor.valid==0 ||
|
|
|
|
ss_descriptor.segment==0 ||
|
|
|
|
ss_descriptor.u.segment.executable ||
|
2004-11-02 19:10:02 +03:00
|
|
|
ss_descriptor.u.segment.r_w==0 )
|
|
|
|
{
|
2002-09-24 12:29:06 +04:00
|
|
|
BX_PANIC(("iret: SS AR byte not writable code segment"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2002-09-24 12:29:06 +04:00
|
|
|
/* stack segment DPL must equal the RPL of the return CS selector,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_descriptor.dpl != cs_selector.rpl ) {
|
|
|
|
BX_PANIC(("iret: SS.dpl != CS selector RPL"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2002-09-24 12:29:06 +04:00
|
|
|
/* SS must be present, else #NP(SS selector) */
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
|
|
|
BX_ERROR(("iret: SS not present!"));
|
2002-09-24 12:29:06 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2002-09-15 06:23:12 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2002-09-15 06:23:12 +04:00
|
|
|
8, 0, BX_READ, &new_rip);
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16,
|
2002-09-15 06:23:12 +04:00
|
|
|
4, 0, BX_READ, &new_eflags);
|
2005-02-28 21:56:05 +03:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24,
|
2002-09-15 06:23:12 +04:00
|
|
|
8, 0, BX_READ, &new_rsp);
|
|
|
|
|
2005-07-29 10:29:57 +04:00
|
|
|
prev_cpl = CPL; /* previous CPL */
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
/* set CPL to the RPL of the return CS selector */
|
2005-07-31 21:57:27 +04:00
|
|
|
branch_far64(&cs_selector, &cs_descriptor, new_rip, cs_selector.rpl);
|
2002-09-15 06:23:12 +04:00
|
|
|
|
|
|
|
/* load flags from stack */
|
|
|
|
// perhaps I should always write_eflags(), thus zeroing
|
|
|
|
// out the upper 16bits of eflags for CS.D_B==0 ???
|
|
|
|
if (cs_descriptor.u.segment.d_b)
|
|
|
|
write_eflags(new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL(), 0, 1);
|
|
|
|
else
|
|
|
|
write_flags((Bit16u) new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL());
|
|
|
|
|
2002-09-24 19:41:03 +04:00
|
|
|
if ( (raw_ss_selector & 0xfffc) != 0 ) {
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
// load SS:RSP from stack
|
|
|
|
// load the SS-cache with SS descriptor
|
|
|
|
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-24 19:41:03 +04:00
|
|
|
else {
|
2002-09-24 20:35:44 +04:00
|
|
|
loadSRegLMNominal(BX_SEG_REG_SS, raw_ss_selector, 0, cs_selector.rpl);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
RSP = new_rsp;
|
|
|
|
|
|
|
|
validate_seg_regs();
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2002-09-15 06:23:12 +04:00
|
|
|
#endif // #if BX_SUPPORT_X86_64
|
|
|
|
else {
|
|
|
|
/* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit16u top_nbytes_same, top_nbytes_outer;
|
|
|
|
Bit32u cs_offset, ss_offset;
|
2005-07-20 05:26:47 +04:00
|
|
|
Bit32u new_eip, new_esp, temp_RSP, new_eflags;
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit16u new_ip, new_sp, new_flags;
|
|
|
|
Bit8u prev_cpl;
|
|
|
|
|
|
|
|
/* 16bit opsize | 32bit opsize
|
|
|
|
* ==============================
|
|
|
|
* SS eSP+8 | SS eSP+16
|
|
|
|
* SP eSP+6 | ESP eSP+12
|
|
|
|
* -------------------------------
|
|
|
|
* FLAGS eSP+4 | EFLAGS eSP+8
|
|
|
|
* CS eSP+2 | CS eSP+4
|
|
|
|
* IP eSP+0 | EIP eSP+0
|
|
|
|
*/
|
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
top_nbytes_same = 12;
|
|
|
|
top_nbytes_outer = 20;
|
|
|
|
cs_offset = 4;
|
|
|
|
ss_offset = 16;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
top_nbytes_same = 6;
|
|
|
|
top_nbytes_outer = 10;
|
|
|
|
cs_offset = 2;
|
|
|
|
ss_offset = 8;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* CS on stack must be within stack limits, else #SS(0) */
|
|
|
|
if ( !can_pop(top_nbytes_same) ) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: CS not within stack limits"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
2005-07-20 05:26:47 +04:00
|
|
|
temp_RSP = ESP;
|
2001-04-10 05:04:59 +04:00
|
|
|
else
|
2005-07-20 05:26:47 +04:00
|
|
|
temp_RSP = SP;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + cs_offset,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, CPL==3, BX_READ, &raw_cs_selector);
|
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, CPL==3, BX_READ, &new_eip);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, CPL==3, BX_READ, &new_eflags);
|
|
|
|
|
|
|
|
// if VM=1 in flags image on stack then STACK_RETURN_TO_V86
|
|
|
|
if (new_eflags & 0x00020000) {
|
2005-03-12 21:09:32 +03:00
|
|
|
if (CPL == 0) {
|
|
|
|
BX_CPU_THIS_PTR stack_return_to_v86(new_eip, raw_cs_selector, new_eflags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else BX_INFO(("iret: VM set on stack, CPL!=0"));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, CPL==3, BX_READ, &new_ip);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, CPL==3, BX_READ, &new_flags);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_selector(raw_cs_selector, &cs_selector);
|
|
|
|
|
|
|
|
// return CS selector must be non-null, else #GP(0)
|
|
|
|
if ( (raw_cs_selector & 0xfffc) == 0 ) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: return CS selector null"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// selector index must be within descriptor table limits,
|
|
|
|
// else #GP(return selector)
|
2004-11-02 20:31:14 +03:00
|
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
|
|
|
|
|
|
|
// AR byte must indicate code segment else #GP(return selector)
|
|
|
|
if ( cs_descriptor.valid==0 ||
|
|
|
|
cs_descriptor.segment==0 ||
|
2004-05-11 01:05:51 +04:00
|
|
|
cs_descriptor.u.segment.executable==0 )
|
|
|
|
{
|
|
|
|
BX_ERROR(("iret: AR byte indicated non code segment"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// return CS selector RPL must be >= CPL, else #GP(return selector)
|
|
|
|
if (cs_selector.rpl < CPL) {
|
2004-05-11 01:05:51 +04:00
|
|
|
BX_ERROR(("iret: return selector RPL < CPL"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2005-03-05 00:03:22 +03:00
|
|
|
if (cs_descriptor.u.segment.c_ed)
|
|
|
|
{
|
|
|
|
// if return code seg descriptor is conforming
|
|
|
|
// and return code seg DPL > return code seg selector RPL
|
|
|
|
// then #GP(return selector)
|
|
|
|
if (cs_descriptor.dpl > cs_selector.rpl) {
|
|
|
|
BX_PANIC(("iret: conforming, DPL > cs_selector.RPL"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// if return code seg descriptor is non-conforming
|
|
|
|
// and return code seg DPL != return code seg selector RPL
|
|
|
|
// then #GP(return selector)
|
|
|
|
if (cs_descriptor.dpl != cs_selector.rpl) {
|
|
|
|
BX_INFO(("iret: Return with DPL != RPL. #GP(selector)"));
|
|
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// segment must be present else #NP(return selector)
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
|
|
|
BX_ERROR(("iret: segment not present"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
if (cs_selector.rpl == CPL) { /* INTERRUPT RETURN TO SAME LEVEL */
|
|
|
|
/* top 6/12 bytes on stack must be within limits, else #SS(0) */
|
|
|
|
/* satisfied above */
|
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
/* return EIP must be in code segment limit else #GP(0) */
|
|
|
|
if ( new_eip > cs_descriptor.u.segment.limit_scaled ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("iret: IP > descriptor limit"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
/* load CS:EIP from stack */
|
|
|
|
/* load CS-cache with new code segment descriptor */
|
|
|
|
load_cs(&cs_selector, &cs_descriptor, CPL);
|
|
|
|
EIP = new_eip;
|
|
|
|
|
|
|
|
/* load EFLAGS with 3rd doubleword from stack */
|
2002-09-12 22:10:46 +04:00
|
|
|
write_eflags(new_eflags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL (), 0, 1);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
|
|
|
/* return IP must be in code segment limit else #GP(0) */
|
|
|
|
if ( new_ip > cs_descriptor.u.segment.limit_scaled ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("iret: IP > descriptor limit"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
/* load CS:IP from stack */
|
|
|
|
/* load CS-cache with new code segment descriptor */
|
|
|
|
load_cs(&cs_selector, &cs_descriptor, CPL);
|
|
|
|
EIP = new_ip;
|
|
|
|
|
|
|
|
/* load flags with third word on stack */
|
2002-09-12 22:10:46 +04:00
|
|
|
write_flags(new_flags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL ());
|
2004-05-11 01:05:51 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* increment stack by 6/12 */
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
|
|
|
ESP += top_nbytes_same;
|
|
|
|
else
|
|
|
|
SP += top_nbytes_same;
|
|
|
|
return;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL */
|
|
|
|
/* 16bit opsize | 32bit opsize
|
|
|
|
* ==============================
|
|
|
|
* SS eSP+8 | SS eSP+16
|
|
|
|
* SP eSP+6 | ESP eSP+12
|
|
|
|
* FLAGS eSP+4 | EFLAGS eSP+8
|
|
|
|
* CS eSP+2 | CS eSP+4
|
|
|
|
* IP eSP+0 | EIP eSP+0
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* top 10/20 bytes on stack must be within limits else #SS(0) */
|
|
|
|
if ( !can_pop(top_nbytes_outer) ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("iret: top 10/20 bytes not within stack limits"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_SS_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* examine return SS selector and associated descriptor */
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + ss_offset,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &raw_ss_selector);
|
|
|
|
|
|
|
|
/* selector must be non-null, else #GP(0) */
|
|
|
|
if ( (raw_ss_selector & 0xfffc) == 0 ) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: SS selector null"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_selector(raw_ss_selector, &ss_selector);
|
|
|
|
|
|
|
|
/* selector RPL must = RPL of return CS selector,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_selector.rpl != cs_selector.rpl) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: SS.rpl != CS.rpl"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* selector index must be within its descriptor table limits,
|
|
|
|
* else #GP(SS selector) */
|
2004-11-02 19:10:02 +03:00
|
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
|
|
|
|
|
|
|
/* AR byte must indicate a writable data segment,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_descriptor.valid==0 ||
|
|
|
|
ss_descriptor.segment==0 ||
|
|
|
|
ss_descriptor.u.segment.executable ||
|
2005-03-05 00:03:22 +03:00
|
|
|
ss_descriptor.u.segment.r_w==0 )
|
|
|
|
{
|
|
|
|
BX_ERROR(("iret: SS AR byte not writable code segment"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* stack segment DPL must equal the RPL of the return CS selector,
|
|
|
|
* else #GP(SS selector) */
|
|
|
|
if ( ss_descriptor.dpl != cs_selector.rpl ) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: SS.dpl != CS selector RPL"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* SS must be present, else #NP(SS selector) */
|
2005-07-11 00:32:32 +04:00
|
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
2005-03-05 00:03:22 +03:00
|
|
|
BX_ERROR(("iret: SS not present!"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
if (i->os32L()) {
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, 0, BX_READ, &new_eip);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 8,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, 0, BX_READ, &new_eflags);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 12,
|
2001-04-10 05:04:59 +04:00
|
|
|
4, 0, BX_READ, &new_esp);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else {
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &new_ip);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &new_flags);
|
2005-07-20 05:26:47 +04:00
|
|
|
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 6,
|
2001-04-10 05:04:59 +04:00
|
|
|
2, 0, BX_READ, &new_sp);
|
|
|
|
new_eip = new_ip;
|
|
|
|
new_esp = new_sp;
|
|
|
|
new_eflags = new_flags;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* EIP must be in code segment limit, else #GP(0) */
|
|
|
|
if ( new_eip > cs_descriptor.u.segment.limit_scaled ) {
|
2004-11-02 19:10:02 +03:00
|
|
|
BX_ERROR(("iret: IP > descriptor limit"));
|
2001-04-10 05:04:59 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* load CS:EIP from stack */
|
|
|
|
/* load the CS-cache with CS descriptor */
|
|
|
|
/* set CPL to the RPL of the return CS selector */
|
|
|
|
prev_cpl = CPL; /* previous CPL */
|
|
|
|
load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl);
|
2002-09-13 04:15:23 +04:00
|
|
|
EIP = new_eip;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* load flags from stack */
|
|
|
|
// perhaps I should always write_eflags(), thus zeroing
|
|
|
|
// out the upper 16bits of eflags for CS.D_B==0 ???
|
|
|
|
if (cs_descriptor.u.segment.d_b)
|
2002-09-12 22:10:46 +04:00
|
|
|
write_eflags(new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL (), 0, 1);
|
2001-04-10 05:04:59 +04:00
|
|
|
else
|
2002-09-12 22:10:46 +04:00
|
|
|
write_flags((Bit16u) new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL ());
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// load SS:eSP from stack
|
|
|
|
// load the SS-cache with SS descriptor
|
|
|
|
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
|
|
|
|
if (ss_descriptor.u.segment.d_b)
|
|
|
|
ESP = new_esp;
|
|
|
|
else
|
|
|
|
SP = new_esp;
|
|
|
|
|
|
|
|
validate_seg_regs();
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
|
|
|
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("IRET: shouldn't get here!"));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2004-11-03 09:35:48 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP)
|
2004-11-02 20:31:14 +03:00
|
|
|
{
|
2005-03-05 00:03:22 +03:00
|
|
|
// check always, not only in protected mode
|
|
|
|
if (new_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
|
|
|
{
|
|
|
|
BX_ERROR(("branch_near: offset outside of CS limits"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
EIP = new_EIP;
|
|
|
|
revalidate_prefetch_q();
|
2004-11-02 20:31:14 +03:00
|
|
|
}
|
|
|
|
|
2005-07-31 21:57:27 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
|
|
|
|
{
|
|
|
|
Bit64u new_RIP = RIP + (Bit32s) i->Id();
|
|
|
|
|
|
|
|
if (! i->os32L()) {
|
|
|
|
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (! IsCanonical(new_RIP)) {
|
|
|
|
BX_ERROR(("branch_near64: canonical RIP violation"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RIP = new_RIP;
|
|
|
|
revalidate_prefetch_q();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void BX_CPU_C::branch_far32(bx_selector_t *selector,
|
|
|
|
bx_descriptor_t *descriptor, Bit32u eip, Bit8u cpl)
|
|
|
|
{
|
|
|
|
/* instruction pointer must be in code segment limit else #GP(0) */
|
|
|
|
if (eip > descriptor->u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("branch_far: EIP > limit"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Load CS:IP from destination pointer */
|
|
|
|
/* Load CS-cache with new segment descriptor */
|
|
|
|
load_cs(selector, descriptor, cpl);
|
|
|
|
|
|
|
|
/* Change the EIP value */
|
|
|
|
RIP = eip;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::branch_far64(bx_selector_t *selector,
|
2005-07-29 10:29:57 +04:00
|
|
|
bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl)
|
|
|
|
{
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (descriptor->u.segment.l)
|
|
|
|
{
|
|
|
|
if (! IsCanonical(rip)) {
|
|
|
|
BX_ERROR(("branch_far: canonical RIP violation"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* instruction pointer must be in code segment limit else #GP(0) */
|
|
|
|
if (rip > descriptor->u.segment.limit_scaled) {
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_ERROR(("branch_far: RIP > limit"));
|
2005-07-29 10:29:57 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Load CS:IP from destination pointer */
|
|
|
|
/* Load CS-cache with new segment descriptor */
|
|
|
|
load_cs(selector, descriptor, cpl);
|
|
|
|
|
|
|
|
/* Change the RIP value */
|
|
|
|
RIP = rip;
|
|
|
|
}
|
|
|
|
|
2004-11-02 19:10:02 +03:00
|
|
|
void BX_CPU_C::validate_seg_regs(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2005-03-05 00:03:22 +03:00
|
|
|
Bit8u cs_dpl = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl;
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.dpl < cs_dpl)
|
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 0;
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = 0;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl< cs_dpl)
|
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 0;
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = 0;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.dpl < cs_dpl)
|
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.valid = 0;
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value = 0;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.dpl < cs_dpl)
|
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.valid = 0;
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value = 0;
|
2005-03-05 00:03:22 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|