Bochs/bochs/cpu/proc_ctrl.cc
Todd T.Fries bdb89cd364 merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.

In general this provides a generic interface for logging.

logfunctions:: is a class that is inherited by some classes, and also
.   allocated as a standalone global called 'genlog'.  All logging uses
.   one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
.   class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
.   respectively.
.
.   An example usage:
.     BX_INFO(("Hello, World!\n"));

iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance.  It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf().  At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.

More cleanup is coming, but this works for now.  If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.

Comments, bugs, flames, to me: todd@fries.net
2001-05-15 14:49:57 +00:00

1257 lines
37 KiB
C++

// Copyright (C) 2001 MandrakeSoft S.A.
//
// MandrakeSoft S.A.
// 43, rue d'Aboukir
// 75002 Paris - France
// http://www.linux-mandrake.com/
// http://www.mandrakesoft.com/
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "bochs.h"
#define LOG_THIS BX_CPU_THIS_PTR
void
BX_CPU_C::UndefinedOpcode(BxInstruction_t *i)
{
if (i->b1 != 0x63) {
// Windows hits the ARPL command a bunch of times.
// Too much spew...
BX_INFO(("UndefinedOpcode: %02x causes exception 6\n",
(unsigned) i->b1));
}
exception(BX_UD_EXCEPTION, 0, 0);
}
void
BX_CPU_C::NOP(BxInstruction_t *i)
{
}
void
BX_CPU_C::HLT(BxInstruction_t *i)
{
// hack to panic if HLT comes from BIOS
if ( BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value == 0xf000 )
BX_PANIC(("HALT instruction encountered\n"));
if (CPL!=0) {
BX_INFO(("HLT(): CPL!=0\n"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
if ( ! BX_CPU_THIS_PTR eflags.if_ ) {
BX_INFO(("WARNING: HLT instruction with IF=0!\n"));
}
// stops instruction execution and places the processor in a
// HALT state. An enabled interrupt, NMI, or reset will resume
// execution. If interrupt (including NMI) is used to resume
// execution after HLT, the saved CS:eIP points to instruction
// following HLT.
// artificial trap bit, why use another variable.
BX_CPU_THIS_PTR debug_trap |= 0x80000000; // artificial trap
BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
// Execution of this instruction completes. The processor
// will remain in a halt state until one of the above conditions
// is met.
}
#if 0
// (kpl) used to be called by ::HLT()
// (mch) Wait for an interrupt
void
BX_CPU_C::wait_for_interrupt()
{
while ( !(BX_INTR) ) {
#if MCH_WAIT_EVENT_HACK
bx_pc_system.wait_for_event();
#else
BX_TICK1();
#endif
}
}
#endif
void
BX_CPU_C::CLTS(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
BX_PANIC(("CLTS: not implemented for < 286\n"));
#else
if (v8086_mode()) BX_PANIC(("clts: v8086 mode unsupported\n"));
/* read errata file */
// does CLTS also clear NT flag???
// #GP(0) if CPL is not 0
if (CPL!=0) {
BX_INFO(("CLTS(): CPL!=0\n"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
BX_CPU_THIS_PTR cr0.ts = 0;
BX_CPU_THIS_PTR cr0.val32 &= ~0x08;
#endif
}
void
BX_CPU_C::INVD(BxInstruction_t *i)
{
BX_INFO(("---------------\n"));
BX_INFO(("- INVD called -\n"));
BX_INFO(("---------------\n"));
#if BX_CPU_LEVEL >= 4
invalidate_prefetch_q();
if (BX_CPU_THIS_PTR cr0.pe) {
if (CPL!=0) {
BX_INFO(("INVD: CPL!=0\n"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
BX_INSTR_CACHE_CNTRL(BX_INSTR_INVD);
#else
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::WBINVD(BxInstruction_t *i)
{
BX_INFO(("WBINVD: (ignoring)\n"));
#if BX_CPU_LEVEL >= 4
invalidate_prefetch_q();
if (BX_CPU_THIS_PTR cr0.pe) {
if (CPL!=0) {
BX_INFO(("WBINVD: CPL!=0\n"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
BX_INSTR_CACHE_CNTRL(BX_INSTR_WBINVD);
#else
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::MOV_DdRd(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_DdRd: not supported on < 386\n"));
#else
Bit32u val_32;
if (v8086_mode()) BX_PANIC(("MOV_DdRd: v8086 mode unsupported\n"));
/* NOTES:
* 32bit operands always used
* r/m field specifies general register
* mod field should always be 11 binary
* reg field specifies which special register
*/
if (i->mod != 0xc0) {
BX_PANIC(("MOV_DdRd(): rm field not a register!\n"));
}
invalidate_prefetch_q();
if (protected_mode() && CPL!=0) {
BX_PANIC(("MOV_DdRd: CPL!=0\n"));
/* #GP(0) if CPL is not 0 */
exception(BX_GP_EXCEPTION, 0, 0);
}
val_32 = BX_READ_32BIT_REG(i->rm);
if (bx_dbg.dreg)
BX_INFO(("MOV_DdRd: DR[%u]=%08xh unhandled\n",
(unsigned) i->nnn, (unsigned) val_32));
switch (i->nnn) {
case 0: // DR0
BX_CPU_THIS_PTR dr0 = val_32;
break;
case 1: // DR1
BX_CPU_THIS_PTR dr1 = val_32;
break;
case 2: // DR2
BX_CPU_THIS_PTR dr2 = val_32;
break;
case 3: // DR3
BX_CPU_THIS_PTR dr3 = val_32;
break;
case 4: // DR4
case 6: // DR6
// DR4 aliased to DR6 by default. With Debug Extensions on,
// access to DR4 causes #UD
#if BX_CPU_LEVEL >= 4
if ( (i->nnn == 4) && (BX_CPU_THIS_PTR cr4 & 0x00000008) ) {
// Debug extensions on
BX_INFO(("MOV_DdRd: access to DR4 causes #UD\n"));
UndefinedOpcode(i);
}
#endif
#if BX_CPU_LEVEL <= 4
// On 386/486 bit12 is settable
BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
(val_32 & 0x0000f00f);
#else
// On Pentium+, bit12 is always zero
BX_CPU_THIS_PTR dr6 = (BX_CPU_THIS_PTR dr6 & 0xffff0ff0) |
(val_32 & 0x0000e00f);
#endif
break;
case 5: // DR5
case 7: // DR7
// Note: 486+ ignore GE and LE flags. On the 386, exact
// data breakpoint matching does not occur unless it is enabled
// by setting the LE and/or GE flags.
// DR5 aliased to DR7 by default. With Debug Extensions on,
// access to DR5 causes #UD
#if BX_CPU_LEVEL >= 4
if ( (i->nnn == 5) && (BX_CPU_THIS_PTR cr4 & 0x00000008) ) {
// Debug extensions (CR4.DE) on
BX_INFO(("MOV_DdRd: access to DR5 causes #UD\n"));
UndefinedOpcode(i);
}
#endif
// Some sanity checks...
if ( val_32 & 0x00002000 ) {
BX_PANIC(("MOV_DdRd: GD bit not supported yet\n"));
// Note: processor clears GD upon entering debug exception
// handler, to allow access to the debug registers
}
if ( (((val_32>>16) & 3)==2) ||
(((val_32>>20) & 3)==2) ||
(((val_32>>24) & 3)==2) ||
(((val_32>>28) & 3)==2) ) {
// IO breakpoints (10b) are not yet supported.
BX_PANIC(("MOV_DdRd: write of %08x contains IO breakpoint\n",
val_32));
}
if ( (((val_32>>18) & 3)==2) ||
(((val_32>>22) & 3)==2) ||
(((val_32>>26) & 3)==2) ||
(((val_32>>30) & 3)==2) ) {
// LEN0..3 contains undefined length specifier (10b)
BX_PANIC(("MOV_DdRd: write of %08x contains undefined LENx\n",
val_32));
}
if ( ((((val_32>>16) & 3)==0) && (((val_32>>18) & 3)!=0)) ||
((((val_32>>20) & 3)==0) && (((val_32>>22) & 3)!=0)) ||
((((val_32>>24) & 3)==0) && (((val_32>>26) & 3)!=0)) ||
((((val_32>>28) & 3)==0) && (((val_32>>30) & 3)!=0)) ) {
// Instruction breakpoint with LENx not 00b (1-byte length)
BX_PANIC(("MOV_DdRd: write of %08x, R/W=00b LEN!=00b\n",
val_32));
}
#if BX_CPU_LEVEL <= 4
// 386/486: you can play with all the bits except b10 is always 1
BX_CPU_THIS_PTR dr7 = val_32 | 0x00000400;
#else
// Pentium+: bits15,14,12 are hardwired to 0, rest are settable.
// Even bits 11,10 are changeable though reserved.
BX_CPU_THIS_PTR dr7 = (val_32 & 0xffff2fff) | 0x00000400;
#endif
break;
default:
BX_PANIC(("MOV_DdRd: control register index out of range\n"));
break;
}
#endif
}
void
BX_CPU_C::MOV_RdDd(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_RdDd: not supported on < 386\n"));
#else
Bit32u val_32;
if (v8086_mode()) {
BX_INFO(("MOV_RdDd: v8086 mode causes #GP\n"));
exception(BX_GP_EXCEPTION, 0, 0);
}
if (i->mod != 0xc0) {
BX_PANIC(("MOV_RdDd(): rm field not a register!\n"));
UndefinedOpcode(i);
}
if (protected_mode() && (CPL!=0)) {
BX_INFO(("MOV_RdDd: CPL!=0 causes #GP\n"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
if (bx_dbg.dreg)
BX_INFO(("MOV_RdDd: DR%u not implemented yet\n", i->nnn));
switch (i->nnn) {
case 0: // DR0
val_32 = BX_CPU_THIS_PTR dr0;
break;
case 1: // DR1
val_32 = BX_CPU_THIS_PTR dr1;
break;
case 2: // DR2
val_32 = BX_CPU_THIS_PTR dr2;
break;
case 3: // DR3
val_32 = BX_CPU_THIS_PTR dr3;
break;
case 4: // DR4
case 6: // DR6
// DR4 aliased to DR6 by default. With Debug Extensions on,
// access to DR4 causes #UD
#if BX_CPU_LEVEL >= 4
if ( (i->nnn == 4) && (BX_CPU_THIS_PTR cr4 & 0x00000008) ) {
// Debug extensions on
BX_INFO(("MOV_RdDd: access to DR4 causes #UD\n"));
UndefinedOpcode(i);
}
#endif
val_32 = BX_CPU_THIS_PTR dr6;
break;
case 5: // DR5
case 7: // DR7
// DR5 aliased to DR7 by default. With Debug Extensions on,
// access to DR5 causes #UD
#if BX_CPU_LEVEL >= 4
if ( (i->nnn == 5) && (BX_CPU_THIS_PTR cr4 & 0x00000008) ) {
// Debug extensions on
BX_INFO(("MOV_RdDd: access to DR5 causes #UD\n"));
UndefinedOpcode(i);
}
#endif
val_32 = BX_CPU_THIS_PTR dr7;
break;
default:
BX_PANIC(("MOV_RdDd: control register index out of range\n"));
val_32 = 0;
}
BX_WRITE_32BIT_REG(i->rm, val_32);
#endif
}
void
BX_CPU_C::LMSW_Ew(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
BX_PANIC(("LMSW_Ew(): not supported on 8086!\n"));
#else
Bit16u msw;
Bit32u cr0;
if (v8086_mode()) BX_PANIC(("proc_ctrl: v8086 mode unsupported\n"));
if ( protected_mode() ) {
if ( CPL != 0 ) {
BX_INFO(("LMSW: CPL != 0, CPL=%u\n", (unsigned) CPL));
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
}
if (i->mod == 0xc0) {
msw = BX_READ_16BIT_REG(i->rm);
}
else {
read_virtual_word(i->seg, i->rm_addr, &msw);
}
// LMSW does not affect PG,CD,NW,AM,WP,NE,ET bits, and cannot clear PE
// LMSW cannot clear PE
if ( ((msw & 0x0001)==0) && BX_CPU_THIS_PTR cr0.pe ) {
msw |= 0x0001; // adjust PE bit to current value of 1
}
msw &= 0x000f; // LMSW only affects last 4 flags
cr0 = (BX_CPU_THIS_PTR cr0.val32 & 0xfffffff0) | msw;
SetCR0(cr0);
#endif /* BX_CPU_LEVEL < 2 */
}
void
BX_CPU_C::SMSW_Ew(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
BX_PANIC(("SMSW_Ew: not supported yet!\n"));
#else
Bit16u msw;
#if BX_CPU_LEVEL == 2
msw = 0xfff0; /* 80286 init value */
msw |= (BX_CPU_THIS_PTR cr0.ts << 3) |
(BX_CPU_THIS_PTR cr0.em << 2) |
(BX_CPU_THIS_PTR cr0.mp << 1) |
BX_CPU_THIS_PTR cr0.pe;
#else /* 386+ */
/* reserved bits 0 ??? */
/* should NE bit be included here ??? */
// should ET bit be included here (AW)
msw = (BX_CPU_THIS_PTR cr0.ts << 3) |
(BX_CPU_THIS_PTR cr0.em << 2) |
(BX_CPU_THIS_PTR cr0.mp << 1) |
BX_CPU_THIS_PTR cr0.pe;
#endif
if (i->mod == 0xc0) {
if (i->os_32) {
BX_WRITE_32BIT_REG(i->rm, msw); // zeros out high 16bits
}
else {
BX_WRITE_16BIT_REG(i->rm, msw);
}
}
else {
write_virtual_word(i->seg, i->rm_addr, &msw);
}
#endif
}
void
BX_CPU_C::MOV_CdRd(BxInstruction_t *i)
{
// mov general register data to control register
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_CdRd: not supported on < 386\n"));
#else
Bit32u val_32;
if (v8086_mode()) BX_PANIC(("proc_ctrl: v8086 mode unsupported\n"));
/* NOTES:
* 32bit operands always used
* r/m field specifies general register
* mod field should always be 11 binary
* reg field specifies which special register
*/
if (i->mod != 0xc0) {
BX_PANIC(("MOV_CdRd(): rm field not a register!\n"));
}
invalidate_prefetch_q();
if (protected_mode() && CPL!=0) {
BX_PANIC(("MOV_CdRd: CPL!=0\n"));
/* #GP(0) if CPL is not 0 */
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
val_32 = BX_READ_32BIT_REG(i->rm);
switch (i->nnn) {
case 0: // CR0 (MSW)
// BX_INFO(("MOV_CdRd:CR0: R32 = %08x\n @CS:EIP %04x:%04x ",
// (unsigned) val_32,
// (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
// (unsigned) BX_CPU_THIS_PTR eip));
SetCR0(val_32);
break;
case 1: /* CR1 */
BX_PANIC(("MOV_CdRd: CR1 not implemented yet\n"));
break;
case 2: /* CR2 */
BX_INFO(("MOV_CdRd: CR2 not implemented yet\n"));
if (bx_dbg.creg)
BX_INFO(("MOV_CdRd: CR2 = reg\n"));
BX_CPU_THIS_PTR cr2 = val_32;
break;
case 3: // CR3
if (bx_dbg.creg)
BX_INFO(("MOV_CdRd:(%08x)\n", (unsigned) val_32));
// Reserved bits take on value of MOV instruction
CR3_change(val_32);
BX_INSTR_TLB_CNTRL(BX_INSTR_MOV_CR3, val_32);
break;
case 4: // CR4
#if BX_CPU_LEVEL == 3
BX_PANIC(("MOV_CdRd: write to CR4 of 0x%08x on 386\n",
val_32));
UndefinedOpcode(i);
#else
// Protected mode: #GP(0) if attempt to write a 1 to
// any reserved bit of CR4
BX_INFO(("MOV_CdRd: ignoring write to CR4 of 0x%08x\n",
val_32));
if (val_32) {
BX_PANIC(("MOV_CdRd: (CR4) write of 0x%08x\n",
val_32));
}
// Only allow writes of 0 to CR4 for now.
// Writes to bits in CR4 should not be 1s as CPUID
// returns not-supported for all of these features.
BX_CPU_THIS_PTR cr4 = 0;
#endif
break;
default:
BX_PANIC(("MOV_CdRd: control register index out of range\n"));
break;
}
#endif
}
void
BX_CPU_C::MOV_RdCd(BxInstruction_t *i)
{
// mov control register data to register
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_RdCd: not supported on < 386\n"));
#else
Bit32u val_32;
if (v8086_mode()) BX_PANIC(("proc_ctrl: v8086 mode unsupported\n"));
/* NOTES:
* 32bit operands always used
* r/m field specifies general register
* mod field should always be 11 binary
* reg field specifies which special register
*/
if (i->mod != 0xc0) {
BX_PANIC(("MOV_RdCd(): rm field not a register!\n"));
}
if (protected_mode() && CPL!=0) {
BX_PANIC(("MOV_RdCd: CPL!=0\n"));
/* #GP(0) if CPL is not 0 */
exception(BX_GP_EXCEPTION, 0, 0);
return;
}
switch (i->nnn) {
case 0: // CR0 (MSW)
val_32 = BX_CPU_THIS_PTR cr0.val32;
#if 0
BX_INFO(("MOV_RdCd:CR0: R32 = %08x\n @CS:EIP %04x:%04x\n",
(unsigned) val_32,
(unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
(unsigned) BX_CPU_THIS_PTR eip));
#endif
break;
case 1: /* CR1 */
BX_PANIC(("MOV_RdCd: CR1 not implemented yet\n"));
val_32 = 0;
break;
case 2: /* CR2 */
if (bx_dbg.creg)
BX_INFO(("MOV_RdCd: CR2\n"));
val_32 = BX_CPU_THIS_PTR cr2;
break;
case 3: // CR3
if (bx_dbg.creg)
BX_INFO(("MOV_RdCd: reading CR3\n"));
val_32 = BX_CPU_THIS_PTR cr3;
break;
case 4: // CR4
#if BX_CPU_LEVEL == 3
val_32 = 0;
BX_INFO(("MOV_RdCd: read of CR4 causes #UD\n"));
UndefinedOpcode(i);
#else
BX_INFO(("MOV_RdCd: read of CR4\n"));
val_32 = BX_CPU_THIS_PTR cr4;
#endif
break;
default:
BX_PANIC(("MOV_RdCd: control register index out of range\n"));
val_32 = 0;
}
BX_WRITE_32BIT_REG(i->rm, val_32);
#endif
}
void
BX_CPU_C::MOV_TdRd(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_TdRd:\n"));
#elif BX_CPU_LEVEL <= 4
BX_PANIC(("MOV_TdRd:\n"));
#else
// Pentium+ does not have TRx. They were redesigned using the MSRs.
BX_INFO(("MOV_TdRd: causes #UD\n"));
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::MOV_RdTd(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 3
BX_PANIC(("MOV_RdTd:\n"));
#elif BX_CPU_LEVEL <= 4
BX_PANIC(("MOV_RdTd:\n"));
#else
// Pentium+ does not have TRx. They were redesigned using the MSRs.
BX_INFO(("MOV_RdTd: causes #UD\n"));
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::LOADALL(BxInstruction_t *i)
{
#if BX_CPU_LEVEL < 2
BX_PANIC(("undocumented LOADALL instruction not supported on 8086\n"));
#else
Bit16u msw, tr, flags, ip, ldtr;
Bit16u ds_raw, ss_raw, cs_raw, es_raw;
Bit16u di, si, bp, sp, bx, dx, cx, ax;
Bit16u base_15_0, limit;
Bit8u base_23_16, access;
if (v8086_mode()) BX_PANIC(("proc_ctrl: v8086 mode unsupported\n"));
#if BX_CPU_LEVEL > 2
BX_PANIC(("loadall: not implemented for 386\n"));
/* ??? need to set G and other bits, and compute .limit_scaled also */
/* for all segments CS,DS,SS,... */
#endif
if (BX_CPU_THIS_PTR cr0.pe) {
BX_PANIC((
"LOADALL not yet supported for protected mode\n"));
}
BX_PANIC(("LOADALL: handle CR0.val32\n"));
/* MSW */
BX_MEM.read_physical(0x806, 2, &msw);
BX_CPU_THIS_PTR cr0.pe = (msw & 0x01); msw >>= 1;
BX_CPU_THIS_PTR cr0.mp = (msw & 0x01); msw >>= 1;
BX_CPU_THIS_PTR cr0.em = (msw & 0x01); msw >>= 1;
BX_CPU_THIS_PTR cr0.ts = (msw & 0x01);
//BX_INFO(("LOADALL: pe=%u, mp=%u, em=%u, ts=%u\n",
// (unsigned) BX_CPU_THIS_PTR cr0.pe, (unsigned) BX_CPU_THIS_PTR cr0.mp,
// (unsigned) BX_CPU_THIS_PTR cr0.em, (unsigned) BX_CPU_THIS_PTR cr0.ts));
if (BX_CPU_THIS_PTR cr0.pe || BX_CPU_THIS_PTR cr0.mp || BX_CPU_THIS_PTR cr0.em || BX_CPU_THIS_PTR cr0.ts)
BX_PANIC(("LOADALL set PE, MP, EM or TS bits in MSW!\n"));
/* TR */
BX_MEM.read_physical(0x816, 2, &tr);
BX_CPU_THIS_PTR tr.selector.value = tr;
BX_CPU_THIS_PTR tr.selector.rpl = (tr & 0x03); tr >>= 2;
BX_CPU_THIS_PTR tr.selector.ti = (tr & 0x01); tr >>= 1;
BX_CPU_THIS_PTR tr.selector.index = tr;
BX_MEM.read_physical(0x860, 2, &base_15_0);
BX_MEM.read_physical(0x862, 1, &base_23_16);
BX_MEM.read_physical(0x863, 1, &access);
BX_MEM.read_physical(0x864, 2, &limit);
BX_CPU_THIS_PTR tr.cache.valid =
BX_CPU_THIS_PTR tr.cache.p = (access & 0x80) >> 7;
BX_CPU_THIS_PTR tr.cache.dpl = (access & 0x60) >> 5;
BX_CPU_THIS_PTR tr.cache.segment = (access & 0x10) >> 4;
BX_CPU_THIS_PTR tr.cache.type = (access & 0x0f);
BX_CPU_THIS_PTR tr.cache.u.tss286.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR tr.cache.u.tss286.limit = limit;
if ( (BX_CPU_THIS_PTR tr.selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR tr.cache.valid = 0;
}
if ( BX_CPU_THIS_PTR tr.cache.valid == 0 ) {
}
if ( BX_CPU_THIS_PTR tr.cache.u.tss286.limit < 43 ) {
BX_CPU_THIS_PTR tr.cache.valid = 0;
}
if ( BX_CPU_THIS_PTR tr.cache.type != 1 ) {
BX_CPU_THIS_PTR tr.cache.valid = 0;
}
if ( BX_CPU_THIS_PTR tr.cache.segment ) {
BX_CPU_THIS_PTR tr.cache.valid = 0;
}
if (BX_CPU_THIS_PTR tr.cache.valid==0) {
BX_CPU_THIS_PTR tr.cache.u.tss286.base = 0;
BX_CPU_THIS_PTR tr.cache.u.tss286.limit = 0;
BX_CPU_THIS_PTR tr.cache.p = 0;
BX_CPU_THIS_PTR tr.selector.value = 0;
BX_CPU_THIS_PTR tr.selector.index = 0;
BX_CPU_THIS_PTR tr.selector.ti = 0;
BX_CPU_THIS_PTR tr.selector.rpl = 0;
}
/* FLAGS */
BX_MEM.read_physical(0x818, 2, &flags);
write_flags(flags, 1, 1);
/* IP */
BX_MEM.read_physical(0x81a, 2, &ip);
IP = ip;
/* LDTR */
BX_MEM.read_physical(0x81c, 2, &ldtr);
BX_CPU_THIS_PTR ldtr.selector.value = ldtr;
BX_CPU_THIS_PTR ldtr.selector.rpl = (ldtr & 0x03); ldtr >>= 2;
BX_CPU_THIS_PTR ldtr.selector.ti = (ldtr & 0x01); ldtr >>= 1;
BX_CPU_THIS_PTR ldtr.selector.index = ldtr;
if ( (BX_CPU_THIS_PTR ldtr.selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR ldtr.cache.valid = 0;
BX_CPU_THIS_PTR ldtr.cache.p = 0;
BX_CPU_THIS_PTR ldtr.cache.segment = 0;
BX_CPU_THIS_PTR ldtr.cache.type = 0;
BX_CPU_THIS_PTR ldtr.cache.u.ldt.base = 0;
BX_CPU_THIS_PTR ldtr.cache.u.ldt.limit = 0;
BX_CPU_THIS_PTR ldtr.selector.value = 0;
BX_CPU_THIS_PTR ldtr.selector.index = 0;
BX_CPU_THIS_PTR ldtr.selector.ti = 0;
}
else {
BX_MEM.read_physical(0x854, 2, &base_15_0);
BX_MEM.read_physical(0x856, 1, &base_23_16);
BX_MEM.read_physical(0x857, 1, &access);
BX_MEM.read_physical(0x858, 2, &limit);
BX_CPU_THIS_PTR ldtr.cache.valid =
BX_CPU_THIS_PTR ldtr.cache.p = access >> 7;
BX_CPU_THIS_PTR ldtr.cache.dpl = (access >> 5) & 0x03;
BX_CPU_THIS_PTR ldtr.cache.segment = (access >> 4) & 0x01;
BX_CPU_THIS_PTR ldtr.cache.type = (access & 0x0f);
BX_CPU_THIS_PTR ldtr.cache.u.ldt.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR ldtr.cache.u.ldt.limit = limit;
if (access == 0) {
BX_PANIC(("loadall: LDTR case access byte=0.\n"));
}
if ( BX_CPU_THIS_PTR ldtr.cache.valid==0 ) {
BX_PANIC(("loadall: ldtr.valid=0\n"));
}
if (BX_CPU_THIS_PTR ldtr.cache.segment) { /* not a system segment */
BX_INFO((" AR byte = %02x\n", (unsigned) access));
BX_PANIC(("loadall: LDTR descriptor cache loaded with non system segment\n"));
}
if ( BX_CPU_THIS_PTR ldtr.cache.type != 2 ) {
BX_PANIC(("loadall: LDTR.type(%u) != 2\n", (unsigned) (access & 0x0f)));
}
}
/* DS */
BX_MEM.read_physical(0x81e, 2, &ds_raw);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = ds_raw;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.rpl = (ds_raw & 0x03); ds_raw >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.ti = (ds_raw & 0x01); ds_raw >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.index = ds_raw;
BX_MEM.read_physical(0x848, 2, &base_15_0);
BX_MEM.read_physical(0x84a, 1, &base_23_16);
BX_MEM.read_physical(0x84b, 1, &access);
BX_MEM.read_physical(0x84c, 2, &limit);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit = limit;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.a = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.r_w = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.c_ed = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.executable = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl = (access & 0x03); access >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid =
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.p = (access & 0x01);
if ( (BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 0;
}
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid==0 ||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment==0) {
BX_PANIC(("loadall: DS invalid\n"));
}
/* SS */
BX_MEM.read_physical(0x820, 2, &ss_raw);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value = ss_raw;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl = (ss_raw & 0x03); ss_raw >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.ti = (ss_raw & 0x01); ss_raw >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.index = ss_raw;
BX_MEM.read_physical(0x842, 2, &base_15_0);
BX_MEM.read_physical(0x844, 1, &base_23_16);
BX_MEM.read_physical(0x845, 1, &access);
BX_MEM.read_physical(0x846, 2, &limit);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit = limit;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.a = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.r_w = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.c_ed = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.executable = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = (access & 0x03); access >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = (access & 0x01);
if ( (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = 0;
}
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid==0 ||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment==0) {
BX_PANIC(("loadall: SS invalid\n"));
}
/* CS */
BX_MEM.read_physical(0x822, 2, &cs_raw);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value = cs_raw;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl = (cs_raw & 0x03); cs_raw >>= 2;
//BX_INFO(("LOADALL: setting cs.selector.rpl to %u\n",
// (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl));
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.ti = (cs_raw & 0x01); cs_raw >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.index = cs_raw;
BX_MEM.read_physical(0x83c, 2, &base_15_0);
BX_MEM.read_physical(0x83e, 1, &base_23_16);
BX_MEM.read_physical(0x83f, 1, &access);
BX_MEM.read_physical(0x840, 2, &limit);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = limit;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.a = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.r_w = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.c_ed = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.executable = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = (access & 0x03); access >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = (access & 0x01);
if ( (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 0;
}
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid==0 ||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment==0) {
BX_PANIC(("loadall: CS invalid\n"));
}
/* ES */
BX_MEM.read_physical(0x824, 2, &es_raw);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = es_raw;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.rpl = (es_raw & 0x03); es_raw >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.ti = (es_raw & 0x01); es_raw >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.index = es_raw;
BX_MEM.read_physical(0x836, 2, &base_15_0);
BX_MEM.read_physical(0x838, 1, &base_23_16);
BX_MEM.read_physical(0x839, 1, &access);
BX_MEM.read_physical(0x83a, 2, &limit);
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.limit = limit;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.a = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.r_w = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.c_ed = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.u.segment.executable = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.segment = (access & 0x01); access >>= 1;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.dpl = (access & 0x03); access >>= 2;
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.p = (access & 0x01);
#if 0
BX_INFO(("cs.dpl = %02x\n", (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl));
BX_INFO(("ss.dpl = %02x\n", (unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl));
BX_INFO(("BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].dpl = %02x\n", (unsigned) BX_CPU_THIS_PTR ds.cache.dpl));
BX_INFO(("BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].dpl = %02x\n", (unsigned) BX_CPU_THIS_PTR es.cache.dpl));
BX_INFO(("LOADALL: setting cs.selector.rpl to %u\n",
(unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl));
BX_INFO(("LOADALL: setting ss.selector.rpl to %u\n",
(unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl));
BX_INFO(("LOADALL: setting ds.selector.rpl to %u\n",
(unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.rpl));
BX_INFO(("LOADALL: setting es.selector.rpl to %u\n",
(unsigned) BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.rpl));
#endif
if ( (BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value & 0xfffc) == 0 ) {
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 0;
}
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid==0 ||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.segment==0) {
BX_PANIC(("loadall: ES invalid\n"));
}
/* DI */
BX_MEM.read_physical(0x826, 2, &di);
DI = di;
/* SI */
BX_MEM.read_physical(0x828, 2, &si);
SI = si;
/* BP */
BX_MEM.read_physical(0x82a, 2, &bp);
BP = bp;
/* SP */
BX_MEM.read_physical(0x82c, 2, &sp);
SP = sp;
/* BX */
BX_MEM.read_physical(0x82e, 2, &bx);
BX = bx;
/* DX */
BX_MEM.read_physical(0x830, 2, &dx);
DX = dx;
/* CX */
BX_MEM.read_physical(0x832, 2, &cx);
CX = cx;
/* AX */
BX_MEM.read_physical(0x834, 2, &ax);
AX = ax;
/* GDTR */
BX_MEM.read_physical(0x84e, 2, &base_15_0);
BX_MEM.read_physical(0x850, 1, &base_23_16);
BX_MEM.read_physical(0x851, 1, &access);
BX_MEM.read_physical(0x852, 2, &limit);
BX_CPU_THIS_PTR gdtr.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR gdtr.limit = limit;
#if 0
if (access)
BX_INFO(("LOADALL: GDTR access bits not 0 (%02x).\n",
(unsigned) access));
#endif
/* IDTR */
BX_MEM.read_physical(0x85a, 2, &base_15_0);
BX_MEM.read_physical(0x85c, 1, &base_23_16);
BX_MEM.read_physical(0x85d, 1, &access);
BX_MEM.read_physical(0x85e, 2, &limit);
BX_CPU_THIS_PTR idtr.base = (base_23_16 << 16) | base_15_0;
BX_CPU_THIS_PTR idtr.limit = limit;
#endif
}
void
BX_CPU_C::CPUID(BxInstruction_t *i)
{
#if BX_CPU_LEVEL >= 4
unsigned type, family, model, stepping, features;
#endif
invalidate_prefetch_q();
#if BX_CPU_LEVEL >= 4
switch (EAX) {
case 0:
// EAX: highest input value understood by CPUID
// EBX: vendor ID string
// EDX: vendor ID string
// ECX: vendor ID string
EAX = 1; // 486 or pentium
EBX = 0x756e6547; // "Genu"
EDX = 0x49656e69; // "ineI"
ECX = 0x6c65746e; // "ntel"
break;
case 1:
// EAX[3:0] Stepping ID
// EAX[7:4] Model: starts at 1
// EAX[11:8] Family: 4=486, 5=Pentium, 6=PPro
// EAX[13:12] Type: 0=OEM,1=overdrive,2=dual cpu,3=reserved
// EAX[31:14] Reserved
// EBX: Reserved (0)
// ECX: Reserved (0)
// EDX: Feature Flags
// [0:0] FPU on chip
// [1:1] VME: Virtual-8086 Mode enhancements
// [2:2] DE: Debug Extensions (I/O breakpoints)
// [3:3] PSE: Page Size Extensions
// [4:4] TSC: Time Stamp Counter
// [5:5] MSR: RDMSR and WRMSR support
// [6:6] PAE: Physical Address Extensions
// [7:7] MCE: Machine Check Exception
// [8:8] CXS: CMPXCHG8B instruction
// [9:9] APIC: APIC on Chip
// [11:10] Reserved
// [12:12] MTRR: Memory Type Range Reg
// [13:13] PGE/PTE Global Bit
// [14:14] MCA: Machine Check Architecture
// [15:15] CMOV: Cond Mov/Cmp Instructions
// [22:16] Reserved
// [23:23] MMX Technology
// [31:24] Reserved
features = 0; // start with none
type = 0; // OEM
#if BX_CPU_LEVEL == 4
family = 4;
# if BX_SUPPORT_FPU
// 486dx
model = 1;
stepping = 3;
features |= 0x01;
# else
// 486sx
model = 2;
stepping = 3;
# endif
#elif BX_CPU_LEVEL == 5
family = 5;
model = 1; // Pentium (60,66)
stepping = 3; // ???
# if BX_SUPPORT_FPU
features |= 0x01;
# endif
#else
family = 6;
BX_PANIC(("CPUID: not implemented for > 5\n"));
#endif
EAX = (family <<8) | (model<<4) | stepping;
EBX = ECX = 0; // reserved
EDX = features;
break;
default:
EAX = EBX = ECX = EDX = 0; // Reserved, undefined
break;
}
#else
BX_PANIC(("CPUID: not available on < late 486\n"));
#endif
}
void
BX_CPU_C::SetCR0(Bit32u val_32)
{
// from either MOV_CdRd() or debug functions
// protection checks made already or forcing from debug
Boolean prev_pe, prev_pg;
prev_pe = BX_CPU_THIS_PTR cr0.pe;
prev_pg = BX_CPU_THIS_PTR cr0.pg;
BX_CPU_THIS_PTR cr0.pe = val_32 & 0x01;
BX_CPU_THIS_PTR cr0.mp = (val_32 >> 1) & 0x01;
BX_CPU_THIS_PTR cr0.em = (val_32 >> 2) & 0x01;
BX_CPU_THIS_PTR cr0.ts = (val_32 >> 3) & 0x01;
// cr0.et is hardwired to 1
#if BX_CPU_LEVEL >= 4
BX_CPU_THIS_PTR cr0.ne = (val_32 >> 5) & 0x01;
BX_CPU_THIS_PTR cr0.wp = (val_32 >> 16) & 0x01;
BX_CPU_THIS_PTR cr0.am = (val_32 >> 18) & 0x01;
BX_CPU_THIS_PTR cr0.nw = (val_32 >> 29) & 0x01;
BX_CPU_THIS_PTR cr0.cd = (val_32 >> 30) & 0x01;
#endif
BX_CPU_THIS_PTR cr0.pg = (val_32 >> 31) & 0x01;
// handle reserved bits behaviour
#if BX_CPU_LEVEL == 3
BX_CPU_THIS_PTR cr0.val32 = val_32 | 0x7ffffff0;
#elif BX_CPU_LEVEL == 4
BX_CPU_THIS_PTR cr0.val32 = val_32 & 0xe005002f;
#elif BX_CPU_LEVEL == 5
BX_CPU_THIS_PTR cr0.val32 = val_32 | 0x00000010;
#elif BX_CPU_LEVEL == 6
BX_CPU_THIS_PTR cr0.val32 = (val_32 | 0x00000010) & 0xe005003f;
#else
#error "MOV_CdRd: implement reserved bits behaviour for this CPU_LEVEL"
#endif
//if (BX_CPU_THIS_PTR cr0.ts)
// BX_INFO(("MOV_CdRd:CR0.TS set 0x%x\n", (unsigned) val_32));
if (prev_pe==0 && BX_CPU_THIS_PTR cr0.pe) {
enter_protected_mode();
}
else if (prev_pe==1 && BX_CPU_THIS_PTR cr0.pe==0) {
enter_real_mode();
}
if (prev_pg==0 && BX_CPU_THIS_PTR cr0.pg)
enable_paging();
else if (prev_pg==1 && BX_CPU_THIS_PTR cr0.pg==0)
disable_paging();
}
void
BX_CPU_C::RSM(BxInstruction_t *i)
{
#if BX_CPU_LEVEL >= 4
invalidate_prefetch_q();
BX_PANIC(("RSM: System Management Mode not implemented yet\n"));
#else
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::RDTSC(BxInstruction_t *i)
{
#if BX_CPU_LEVEL >= 5
BX_PANIC(("RDTSC: not implemented yet\n"));
#else
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::RDMSR(BxInstruction_t *i)
{
#if BX_CPU_LEVEL >= 5
BX_PANIC(("RDMSR: not implemented yet\n"));
#else
UndefinedOpcode(i);
#endif
}
void
BX_CPU_C::WRMSR(BxInstruction_t *i)
{
#if BX_CPU_LEVEL >= 5
invalidate_prefetch_q();
BX_PANIC(( "WRMSR: not implemented yet\n"));
#else
UndefinedOpcode(i);
#endif
}
#if BX_X86_DEBUGGER
Bit32u
BX_CPU_C::hwdebug_compare(Bit32u laddr_0, unsigned size,
unsigned opa, unsigned opb)
{
// Support x86 hardware debug facilities (DR0..DR7)
Bit32u dr7 = BX_CPU_THIS_PTR dr7;
Boolean ibpoint_found = 0;
Bit32u laddr_n = laddr_0 + (size - 1);
Bit32u dr0, dr1, dr2, dr3;
Bit32u dr0_n, dr1_n, dr2_n, dr3_n;
Bit32u len0, len1, len2, len3;
static unsigned alignment_mask[4] =
// 00b=1 01b=2 10b=undef 11b=4
{ 0xffffffff, 0xfffffffe, 0xffffffff, 0xfffffffc };
Bit32u dr0_op, dr1_op, dr2_op, dr3_op;
len0 = (dr7>>18) & 3;
len1 = (dr7>>22) & 3;
len2 = (dr7>>26) & 3;
len3 = (dr7>>30) & 3;
dr0 = BX_CPU_THIS_PTR dr0 & alignment_mask[len0];
dr1 = BX_CPU_THIS_PTR dr1 & alignment_mask[len1];
dr2 = BX_CPU_THIS_PTR dr2 & alignment_mask[len2];
dr3 = BX_CPU_THIS_PTR dr3 & alignment_mask[len3];
dr0_n = dr0 + len0;
dr1_n = dr1 + len1;
dr2_n = dr2 + len2;
dr3_n = dr3 + len3;
dr0_op = (dr7>>16) & 3;
dr1_op = (dr7>>20) & 3;
dr2_op = (dr7>>24) & 3;
dr3_op = (dr7>>28) & 3;
// See if this instruction address matches any breakpoints
if ( (dr7 & 0x00000003) ) {
if ( (dr0_op==opa || dr0_op==opb) &&
(laddr_0 <= dr0_n) &&
(laddr_n >= dr0) )
ibpoint_found = 1;
}
if ( (dr7 & 0x0000000c) ) {
if ( (dr1_op==opa || dr1_op==opb) &&
(laddr_0 <= dr1_n) &&
(laddr_n >= dr1) )
ibpoint_found = 1;
}
if ( (dr7 & 0x00000030) ) {
if ( (dr2_op==opa || dr2_op==opb) &&
(laddr_0 <= dr2_n) &&
(laddr_n >= dr2) )
ibpoint_found = 1;
}
if ( (dr7 & 0x000000c0) ) {
if ( (dr3_op==opa || dr3_op==opb) &&
(laddr_0 <= dr3_n) &&
(laddr_n >= dr3) )
ibpoint_found = 1;
}
// If *any* enabled breakpoints matched, then we need to
// set status bits for *all* breakpoints, even disabled ones,
// as long as they meet the other breakpoint criteria.
// This code is similar to that above, only without the
// breakpoint enabled check. Seems weird to duplicate effort,
// but its more efficient to do it this way.
if (ibpoint_found) {
// dr6_mask is the return value. These bits represent the bits to
// be OR'd into DR6 as a result of the debug event.
Bit32u dr6_mask=0;
if ( (dr0_op==opa || dr0_op==opb) &&
(laddr_0 <= dr0_n) &&
(laddr_n >= dr0) )
dr6_mask |= 0x01;
if ( (dr1_op==opa || dr1_op==opb) &&
(laddr_0 <= dr1_n) &&
(laddr_n >= dr1) )
dr6_mask |= 0x02;
if ( (dr2_op==opa || dr2_op==opb) &&
(laddr_0 <= dr2_n) &&
(laddr_n >= dr2) )
dr6_mask |= 0x04;
if ( (dr3_op==opa || dr3_op==opb) &&
(laddr_0 <= dr3_n) &&
(laddr_n >= dr3) )
dr6_mask |= 0x08;
return(dr6_mask);
}
return(0);
}
#endif