dynamically allocate VMCB_CACHE only if SVM is actually enabled by CPU model

also reduces include dependency on svm.h
This commit is contained in:
Stanislav Shwartsman 2024-01-12 00:56:09 +02:00
parent d6769cecfc
commit 0eab037907
23 changed files with 171 additions and 79 deletions

View File

@ -798,7 +798,9 @@ typedef void (*xmm_pfp_3op_mask)(BxPackedXmmRegister *opdst, const BxPackedXmmRe
#endif
#if BX_SUPPORT_SVM
#include "svm.h"
struct SVM_HOST_STATE;
struct SVM_CONTROLS;
struct VMCB_CACHE;
#endif
enum monitor_armed_by {
@ -1086,7 +1088,7 @@ public: // for now...
#if BX_SUPPORT_MEMTYPE
BxMemtype vmcb_memtype;
#endif
VMCB_CACHE vmcb;
VMCB_CACHE *vmcb;
// make SVM integration easier
#define SVM_GIF (BX_CPU_THIS_PTR svm_gif)

View File

@ -27,6 +27,10 @@
#include "cpuid.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#if BX_SUPPORT_APIC
#include "apic.h"
#endif

View File

@ -513,6 +513,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP16_Ep(bxInstruction_c *i)
BX_NEXT_TRACE(i);
}
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET16(bxInstruction_c *i)
{
BX_INSTR_FAR_BRANCH_ORIGIN();

View File

@ -533,6 +533,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP32_Ep(bxInstruction_c *i)
BX_NEXT_TRACE(i);
}
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET32(bxInstruction_c *i)
{
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);

View File

@ -449,6 +449,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
BX_NEXT_TRACE(i);
}
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c *i)
{
invalidate_prefetch_q();

View File

@ -29,6 +29,10 @@
#include "apic.h"
#endif
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "iodev/iodev.h"
bool BX_CPU_C::handleWaitForEvent(void)

View File

@ -25,6 +25,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "param_names.h"
#include "iodev/iodev.h"

View File

@ -24,6 +24,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::SAHF(bxInstruction_c *i)
{
set_SF((AH & 0x80) >> 7);

View File

@ -25,6 +25,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::setEFlags(Bit32u new_eflags)
{
Bit32u eflags = BX_CPU_THIS_PTR eflags;

View File

@ -34,6 +34,10 @@
#define BX_CPUID_SUPPORT_ISA_EXTENSION(feature) \
(this->is_cpu_extension_supported(feature))
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#if BX_CPU_LEVEL >= 4
bx_generic_cpuid_t::bx_generic_cpuid_t(BX_CPU_C *cpu): bx_cpuid_t(cpu)

View File

@ -37,6 +37,10 @@
#include "avx/amx.h"
#endif
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include <stdlib.h>
BX_CPU_C::BX_CPU_C(unsigned id): bx_cpuid(id)
@ -142,6 +146,13 @@ void BX_CPU_C::initialize(void)
}
#endif
#if BX_SUPPORT_SVM
vmcb = NULL;
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SVM)) {
vmcb = new VMCB_CACHE;
}
#endif
#if BX_CONFIGURE_MSRS
for (unsigned n=0; n < BX_MSR_MAX_INDEX; n++) {
BX_CPU_THIS_PTR msrs[n] = 0;
@ -745,6 +756,10 @@ BX_CPU_C::~BX_CPU_C()
delete amx;
#endif
#if BX_SUPPORT_SVM
delete vmcb;
#endif
#if InstrumentCPU
delete stats;
#endif

View File

@ -25,6 +25,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "iodev/iodev.h"
//

View File

@ -28,6 +28,10 @@
#include "msr.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#if BX_SUPPORT_APIC
#include "apic.h"
#endif

View File

@ -27,6 +27,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#if BX_SUPPORT_APIC
#include "apic.h"
#endif

View File

@ -30,6 +30,10 @@
#include "apic.h"
#endif
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "memory/memory-bochs.h"
#include "pc_system.h"
@ -1658,8 +1662,8 @@ bx_phy_address BX_CPU_C::nested_walk_long_mode(bx_phy_address guest_paddr, unsig
BxMemtype entry_memtype[5] = { BX_MEMTYPE_INVALID, BX_MEMTYPE_INVALID, BX_MEMTYPE_INVALID, BX_MEMTYPE_INVALID, BX_MEMTYPE_INVALID };
bool nx_fault = false;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb->host_state;
bx_phy_address ppf = ctrls->ncr3 & BX_CR3_PAGING_MASK;
Bit64u offset_mask = ((BX_CONST64(1) << BX_CPU_THIS_PTR linaddr_width) - 1);
unsigned combined_access = BX_COMBINED_ACCESS_WRITE | BX_COMBINED_ACCESS_USER;
@ -1725,8 +1729,8 @@ bx_phy_address BX_CPU_C::nested_walk_PAE(bx_phy_address guest_paddr, unsigned rw
unsigned combined_access = BX_COMBINED_ACCESS_WRITE | BX_COMBINED_ACCESS_USER;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb->host_state;
bx_phy_address ncr3 = ctrls->ncr3 & 0xffffffe0;
unsigned index = (guest_paddr >> 30) & 0x3;
Bit64u pdptr;
@ -1798,8 +1802,8 @@ bx_phy_address BX_CPU_C::nested_walk_legacy(bx_phy_address guest_paddr, unsigned
BxMemtype entry_memtype[2] = { BX_MEMTYPE_INVALID };
int leaf;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb->host_state;
bx_phy_address ppf = ctrls->ncr3 & BX_CR3_PAGING_MASK;
unsigned combined_access = BX_COMBINED_ACCESS_WRITE | BX_COMBINED_ACCESS_USER;
@ -1849,7 +1853,7 @@ bx_phy_address BX_CPU_C::nested_walk_legacy(bx_phy_address guest_paddr, unsigned
bx_phy_address BX_CPU_C::nested_walk(bx_phy_address guest_paddr, unsigned rw, bool is_page_walk)
{
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb->host_state;
BX_DEBUG(("Nested walk for guest paddr 0x" FMT_PHY_ADDRX, guest_paddr));
@ -2335,7 +2339,7 @@ bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy, bx_ad
bx_phy_address pt_address = BX_CPU_THIS_PTR cr3 & BX_CR3_PAGING_MASK;
#if BX_SUPPORT_SVM
if (nested_walk) {
pt_address = LPFOf(BX_CPU_THIS_PTR vmcb.ctrls.ncr3);
pt_address = LPFOf(BX_CPU_THIS_PTR vmcb->ctrls.ncr3);
}
#endif

View File

@ -26,6 +26,10 @@
#include "cpuid.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "pc_system.h"
#include "gui/gui.h"

View File

@ -24,6 +24,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::ARPL_EwGw(bxInstruction_c *i)
{
Bit16u op2_16, op1_16;

View File

@ -26,6 +26,10 @@
#include "smm.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#if BX_CPU_LEVEL >= 3
void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSM(bxInstruction_c *i)

View File

@ -25,6 +25,10 @@
#include "cpuid.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
void BX_CPP_AttrRegparmN(1) BX_CPU_C::BOUND_GwMa(bxInstruction_c *i)
{
Bit16s op1_16 = BX_READ_16BIT_REG(i->dst());

View File

@ -24,11 +24,13 @@
#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#include "cpuid.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#include "cpuid.h"
#include "gui/paramtree.h"
#include "decoder/ia_opcodes.h"
@ -315,7 +317,7 @@ void BX_CPU_C::SvmExitSaveGuestState(void)
vmcb_write8(SVM_CONTROL_INTERRUPT_SHADOW, interrupts_inhibited(BX_INHIBIT_INTERRUPTS));
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
if (ctrls->nested_paging) {
vmcb_write64(SVM_GUEST_PAT, BX_CPU_THIS_PTR msr.pat.u64);
@ -552,7 +554,7 @@ bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
if (paged_real_mode)
BX_CPU_THIS_PTR cr0.val32 |= BX_CR0_PG_MASK;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
if (! ctrls->nested_paging) {
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
@ -656,7 +658,7 @@ void BX_CPU_C::Svm_Vmexit(int reason, Bit64u exitinfo1, Bit64u exitinfo2)
// STEP 0: Update exit reason
//
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
vmcb_write64(SVM_CONTROL64_EXITCODE, (Bit64u) ((Bit64s) reason));
vmcb_write64(SVM_CONTROL64_EXITINFO1, exitinfo1);
@ -682,7 +684,7 @@ void BX_CPU_C::Svm_Vmexit(int reason, Bit64u exitinfo1, Bit64u exitinfo2)
//
// Step 2:
//
SvmExitLoadHostState(&BX_CPU_THIS_PTR vmcb.host_state);
SvmExitLoadHostState(&BX_CPU_THIS_PTR vmcb->host_state);
//
// STEP 3: Go back to SVM host
@ -705,7 +707,7 @@ extern struct BxExceptionInfo exceptions_info[];
bool BX_CPU_C::SvmInjectEvents(void)
{
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
ctrls->eventinj = vmcb_read32(SVM_CONTROL32_EVENT_INJECTION);
if ((ctrls->eventinj & 0x80000000) == 0) return true;
@ -770,7 +772,7 @@ void BX_CPU_C::SvmInterceptException(unsigned type, unsigned vector, Bit16u errc
BX_ASSERT(vector < 32);
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
BX_ASSERT(type == BX_HARDWARE_EXCEPTION || type == BX_SOFTWARE_EXCEPTION);
@ -790,7 +792,7 @@ void BX_CPU_C::SvmInterceptException(unsigned type, unsigned vector, Bit16u errc
ctrls->exitintinfo_error_code = errcode;
ctrls->exitintinfo = vector | (BX_HARDWARE_EXCEPTION << 8);
if (errcode_valid)
BX_CPU_THIS_PTR vmcb.ctrls.exitintinfo |= (1 << 11); // error code delivered
BX_CPU_THIS_PTR vmcb->ctrls.exitintinfo |= (1 << 11); // error code delivered
return;
}
@ -829,7 +831,7 @@ void BX_CPU_C::SvmInterceptIO(bxInstruction_c *i, unsigned port, unsigned len)
bx_phy_address pAddr;
// access_read_physical cannot read 2 bytes cross 4K boundary :(
pAddr = BX_CPU_THIS_PTR vmcb.ctrls.iopm_base + (port / 8);
pAddr = BX_CPU_THIS_PTR vmcb->ctrls.iopm_base + (port / 8);
bitmap[0] = read_physical_byte(pAddr, MEMTYPE(resolve_memtype(pAddr)), BX_IO_BITMAP_ACCESS);
pAddr++;
@ -918,7 +920,7 @@ void BX_CPU_C::SvmInterceptMSR(unsigned op, Bit32u msr)
else if (msr >= 0xc0010000 && msr <= 0xc0011fff) msr_map_offset = 4096;
if (msr_map_offset >= 0) {
bx_phy_address msr_bitmap_addr = BX_CPU_THIS_PTR vmcb.ctrls.msrpm_base + msr_map_offset;
bx_phy_address msr_bitmap_addr = BX_CPU_THIS_PTR vmcb->ctrls.msrpm_base + msr_map_offset;
Bit32u msr_offset = (msr & 0x1fff) * 2 + op;
bx_phy_address pAddr = msr_bitmap_addr + (msr_offset / 8);
@ -967,7 +969,7 @@ void BX_CPU_C::SvmInterceptTaskSwitch(Bit16u tss_selector, unsigned source, bool
void BX_CPU_C::SvmInterceptPAUSE(void)
{
if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_PAUSE_FILTER)) {
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb.ctrls;
SVM_CONTROLS *ctrls = &BX_CPU_THIS_PTR vmcb->ctrls;
if (ctrls->pause_filter_count) {
ctrls->pause_filter_count--;
return;
@ -1004,12 +1006,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMRUN(bxInstruction_c *i)
//
// Step 1: Save host state to physical memory indicated in SVM_HSAVE_PHY_ADDR_MSR
//
SvmEnterSaveHostState(&BX_CPU_THIS_PTR vmcb.host_state);
SvmEnterSaveHostState(&BX_CPU_THIS_PTR vmcb->host_state);
//
// Step 2: Load control information from the VMCB
//
if (!SvmEnterLoadCheckControls(&BX_CPU_THIS_PTR vmcb.ctrls))
if (!SvmEnterLoadCheckControls(&BX_CPU_THIS_PTR vmcb->ctrls))
Svm_Vmexit(SVM_VMEXIT_INVALID);
//
@ -1234,26 +1236,26 @@ void BX_CPU_C::register_svm_state(bx_param_c *parent)
bx_list_c *vmcb_ctrls = new bx_list_c(svm, "VMCB_CTRLS");
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, cr_rd_ctrl, BX_CPU_THIS_PTR vmcb.ctrls.cr_rd_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, cr_wr_ctrl, BX_CPU_THIS_PTR vmcb.ctrls.cr_wr_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, dr_rd_ctrl, BX_CPU_THIS_PTR vmcb.ctrls.dr_rd_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, dr_wr_ctrl, BX_CPU_THIS_PTR vmcb.ctrls.dr_wr_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exceptions_intercept, BX_CPU_THIS_PTR vmcb.ctrls.exceptions_intercept);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, intercept_vector0, BX_CPU_THIS_PTR vmcb.ctrls.intercept_vector[0]);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, intercept_vector1, BX_CPU_THIS_PTR vmcb.ctrls.intercept_vector[1]);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, iopm_base, BX_CPU_THIS_PTR vmcb.ctrls.iopm_base);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, msrpm_base, BX_CPU_THIS_PTR vmcb.ctrls.msrpm_base);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exitintinfo, BX_CPU_THIS_PTR vmcb.ctrls.exitintinfo);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exitintinfo_errcode, BX_CPU_THIS_PTR vmcb.ctrls.exitintinfo_error_code);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, eventinj, BX_CPU_THIS_PTR vmcb.ctrls.eventinj);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, cr_rd_ctrl, BX_CPU_THIS_PTR vmcb->ctrls.cr_rd_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, cr_wr_ctrl, BX_CPU_THIS_PTR vmcb->ctrls.cr_wr_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, dr_rd_ctrl, BX_CPU_THIS_PTR vmcb->ctrls.dr_rd_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, dr_wr_ctrl, BX_CPU_THIS_PTR vmcb->ctrls.dr_wr_ctrl);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exceptions_intercept, BX_CPU_THIS_PTR vmcb->ctrls.exceptions_intercept);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, intercept_vector0, BX_CPU_THIS_PTR vmcb->ctrls.intercept_vector[0]);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, intercept_vector1, BX_CPU_THIS_PTR vmcb->ctrls.intercept_vector[1]);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, iopm_base, BX_CPU_THIS_PTR vmcb->ctrls.iopm_base);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, msrpm_base, BX_CPU_THIS_PTR vmcb->ctrls.msrpm_base);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exitintinfo, BX_CPU_THIS_PTR vmcb->ctrls.exitintinfo);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, exitintinfo_errcode, BX_CPU_THIS_PTR vmcb->ctrls.exitintinfo_error_code);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, eventinj, BX_CPU_THIS_PTR vmcb->ctrls.eventinj);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_tpr, BX_CPU_THIS_PTR vmcb.ctrls.v_tpr);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_intr_prio, BX_CPU_THIS_PTR vmcb.ctrls.v_intr_prio);
BXRS_PARAM_BOOL(vmcb_ctrls, v_ignore_tpr, BX_CPU_THIS_PTR vmcb.ctrls.v_ignore_tpr);
BXRS_PARAM_BOOL(vmcb_ctrls, v_intr_masking, BX_CPU_THIS_PTR vmcb.ctrls.v_intr_masking);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_intr_vector, BX_CPU_THIS_PTR vmcb.ctrls.v_intr_vector);
BXRS_PARAM_BOOL(vmcb_ctrls, nested_paging, BX_CPU_THIS_PTR vmcb.ctrls.nested_paging);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, ncr3, BX_CPU_THIS_PTR vmcb.ctrls.ncr3);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_tpr, BX_CPU_THIS_PTR vmcb->ctrls.v_tpr);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_intr_prio, BX_CPU_THIS_PTR vmcb->ctrls.v_intr_prio);
BXRS_PARAM_BOOL(vmcb_ctrls, v_ignore_tpr, BX_CPU_THIS_PTR vmcb->ctrls.v_ignore_tpr);
BXRS_PARAM_BOOL(vmcb_ctrls, v_intr_masking, BX_CPU_THIS_PTR vmcb->ctrls.v_intr_masking);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, v_intr_vector, BX_CPU_THIS_PTR vmcb->ctrls.v_intr_vector);
BXRS_PARAM_BOOL(vmcb_ctrls, nested_paging, BX_CPU_THIS_PTR vmcb->ctrls.nested_paging);
BXRS_HEX_PARAM_FIELD(vmcb_ctrls, ncr3, BX_CPU_THIS_PTR vmcb->ctrls.ncr3);
//
// VMCB Host State
@ -1262,7 +1264,7 @@ void BX_CPU_C::register_svm_state(bx_param_c *parent)
bx_list_c *host = new bx_list_c(svm, "VMCB_HOST_STATE");
for(unsigned n=0; n<4; n++) {
bx_segment_reg_t *segment = &BX_CPU_THIS_PTR vmcb.host_state.sregs[n];
bx_segment_reg_t *segment = &BX_CPU_THIS_PTR vmcb->host_state.sregs[n];
bx_list_c *sreg = new bx_list_c(host, segname[n]);
BXRS_HEX_PARAM_FIELD(sreg, selector, segment->selector.value);
BXRS_HEX_PARAM_FIELD(sreg, valid, segment->cache.valid);
@ -1281,21 +1283,21 @@ void BX_CPU_C::register_svm_state(bx_param_c *parent)
}
bx_list_c *GDTR = new bx_list_c(host, "GDTR");
BXRS_HEX_PARAM_FIELD(GDTR, base, BX_CPU_THIS_PTR vmcb.host_state.gdtr.base);
BXRS_HEX_PARAM_FIELD(GDTR, limit, BX_CPU_THIS_PTR vmcb.host_state.gdtr.limit);
BXRS_HEX_PARAM_FIELD(GDTR, base, BX_CPU_THIS_PTR vmcb->host_state.gdtr.base);
BXRS_HEX_PARAM_FIELD(GDTR, limit, BX_CPU_THIS_PTR vmcb->host_state.gdtr.limit);
bx_list_c *IDTR = new bx_list_c(host, "IDTR");
BXRS_HEX_PARAM_FIELD(IDTR, base, BX_CPU_THIS_PTR vmcb.host_state.idtr.base);
BXRS_HEX_PARAM_FIELD(IDTR, limit, BX_CPU_THIS_PTR vmcb.host_state.idtr.limit);
BXRS_HEX_PARAM_FIELD(IDTR, base, BX_CPU_THIS_PTR vmcb->host_state.idtr.base);
BXRS_HEX_PARAM_FIELD(IDTR, limit, BX_CPU_THIS_PTR vmcb->host_state.idtr.limit);
BXRS_HEX_PARAM_FIELD(host, efer, BX_CPU_THIS_PTR vmcb.host_state.efer.val32);
BXRS_HEX_PARAM_FIELD(host, cr0, BX_CPU_THIS_PTR vmcb.host_state.cr0.val32);
BXRS_HEX_PARAM_FIELD(host, cr3, BX_CPU_THIS_PTR vmcb.host_state.cr3);
BXRS_HEX_PARAM_FIELD(host, cr4, BX_CPU_THIS_PTR vmcb.host_state.cr4.val32);
BXRS_HEX_PARAM_FIELD(host, eflags, BX_CPU_THIS_PTR vmcb.host_state.eflags);
BXRS_HEX_PARAM_FIELD(host, rip, BX_CPU_THIS_PTR vmcb.host_state.rip);
BXRS_HEX_PARAM_FIELD(host, rsp, BX_CPU_THIS_PTR vmcb.host_state.rsp);
BXRS_HEX_PARAM_FIELD(host, rax, BX_CPU_THIS_PTR vmcb.host_state.rax);
BXRS_HEX_PARAM_FIELD(host, efer, BX_CPU_THIS_PTR vmcb->host_state.efer.val32);
BXRS_HEX_PARAM_FIELD(host, cr0, BX_CPU_THIS_PTR vmcb->host_state.cr0.val32);
BXRS_HEX_PARAM_FIELD(host, cr3, BX_CPU_THIS_PTR vmcb->host_state.cr3);
BXRS_HEX_PARAM_FIELD(host, cr4, BX_CPU_THIS_PTR vmcb->host_state.cr4.val32);
BXRS_HEX_PARAM_FIELD(host, eflags, BX_CPU_THIS_PTR vmcb->host_state.eflags);
BXRS_HEX_PARAM_FIELD(host, rip, BX_CPU_THIS_PTR vmcb->host_state.rip);
BXRS_HEX_PARAM_FIELD(host, rsp, BX_CPU_THIS_PTR vmcb->host_state.rsp);
BXRS_HEX_PARAM_FIELD(host, rax, BX_CPU_THIS_PTR vmcb->host_state.rax);
}
#endif // BX_SUPPORT_SVM

View File

@ -239,7 +239,7 @@ enum SVM_intercept_codes {
#define SVM_GUEST_LAST_EXCEPTION_FROM_MSR (0x688)
#define SVM_GUEST_LAST_EXCEPTION_TO_MSR (0x690)
typedef struct bx_SVM_HOST_STATE
struct SVM_HOST_STATE
{
bx_segment_reg_t sregs[4];
@ -256,10 +256,9 @@ typedef struct bx_SVM_HOST_STATE
Bit64u rax;
BxPackedRegister pat_msr;
};
} SVM_HOST_STATE;
typedef struct bx_SVM_GUEST_STATE
struct SVM_GUEST_STATE
{
bx_segment_reg_t sregs[4];
@ -282,10 +281,9 @@ typedef struct bx_SVM_GUEST_STATE
unsigned cpl;
bool inhibit_interrupts;
};
} SVM_GUEST_STATE;
typedef struct bx_SVM_CONTROLS
struct SVM_CONTROLS
{
Bit16u cr_rd_ctrl;
Bit16u cr_wr_ctrl;
@ -314,26 +312,25 @@ typedef struct bx_SVM_CONTROLS
Bit16u pause_filter_count;
//Bit16u pause_filter_threshold;
} SVM_CONTROLS;
};
#if defined(NEED_CPU_REG_SHORTCUTS)
#define SVM_V_TPR (BX_CPU_THIS_PTR vmcb.ctrls.v_tpr)
#define SVM_V_INTR_PRIO (BX_CPU_THIS_PTR vmcb.ctrls.v_intr_prio)
#define SVM_V_IGNORE_TPR (BX_CPU_THIS_PTR vmcb.ctrls.v_ignore_tpr)
#define SVM_V_INTR_MASKING (BX_CPU_THIS_PTR vmcb.ctrls.v_intr_masking)
#define SVM_V_INTR_VECTOR (BX_CPU_THIS_PTR vmcb.ctrls.v_intr_vector)
#define SVM_V_TPR (BX_CPU_THIS_PTR vmcb->ctrls.v_tpr)
#define SVM_V_INTR_PRIO (BX_CPU_THIS_PTR vmcb->ctrls.v_intr_prio)
#define SVM_V_IGNORE_TPR (BX_CPU_THIS_PTR vmcb->ctrls.v_ignore_tpr)
#define SVM_V_INTR_MASKING (BX_CPU_THIS_PTR vmcb->ctrls.v_intr_masking)
#define SVM_V_INTR_VECTOR (BX_CPU_THIS_PTR vmcb->ctrls.v_intr_vector)
#define SVM_HOST_IF (BX_CPU_THIS_PTR vmcb.host_state.eflags & EFlagsIFMask)
#define SVM_HOST_IF (BX_CPU_THIS_PTR vmcb->host_state.eflags & EFlagsIFMask)
#endif
typedef struct bx_VMCB_CACHE
struct VMCB_CACHE
{
SVM_HOST_STATE host_state;
SVM_CONTROLS ctrls;
} VMCB_CACHE;
};
// ========================
// SVM intercept controls
@ -389,24 +386,24 @@ enum {
};
#define SVM_INTERCEPT(intercept_bitnum) \
(BX_CPU_THIS_PTR vmcb.ctrls.intercept_vector[intercept_bitnum / 32] & (1 << (intercept_bitnum & 31)))
(BX_CPU_THIS_PTR vmcb->ctrls.intercept_vector[intercept_bitnum / 32] & (1 << (intercept_bitnum & 31)))
#define SVM_EXCEPTION_INTERCEPTED(vector) \
(BX_CPU_THIS_PTR vmcb.ctrls.exceptions_intercept & (1<<(vector)))
(BX_CPU_THIS_PTR vmcb->ctrls.exceptions_intercept & (1<<(vector)))
#define SVM_CR_READ_INTERCEPTED(reg_num) \
(BX_CPU_THIS_PTR vmcb.ctrls.cr_rd_ctrl & (1<<(reg_num)))
(BX_CPU_THIS_PTR vmcb->ctrls.cr_rd_ctrl & (1<<(reg_num)))
#define SVM_CR_WRITE_INTERCEPTED(reg_num) \
(BX_CPU_THIS_PTR vmcb.ctrls.cr_wr_ctrl & (1<<(reg_num)))
(BX_CPU_THIS_PTR vmcb->ctrls.cr_wr_ctrl & (1<<(reg_num)))
#define SVM_DR_READ_INTERCEPTED(reg_num) \
(BX_CPU_THIS_PTR vmcb.ctrls.dr_rd_ctrl & (1<<(reg_num)))
(BX_CPU_THIS_PTR vmcb->ctrls.dr_rd_ctrl & (1<<(reg_num)))
#define SVM_DR_WRITE_INTERCEPTED(reg_num) \
(BX_CPU_THIS_PTR vmcb.ctrls.dr_wr_ctrl & (1<<(reg_num)))
(BX_CPU_THIS_PTR vmcb->ctrls.dr_wr_ctrl & (1<<(reg_num)))
#define SVM_NESTED_PAGING_ENABLED (BX_CPU_THIS_PTR vmcb.ctrls.nested_paging)
#define SVM_NESTED_PAGING_ENABLED (BX_CPU_THIS_PTR vmcb->ctrls.nested_paging)
#endif // BX_SUPPORT_SVM

View File

@ -24,6 +24,10 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
// Notes:
// ======

View File

@ -27,6 +27,10 @@
#include "msr.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SVM
#include "svm.h"
#endif
#include "decoder/ia_opcodes.h"
const Bit64u XSAVEC_COMPACTION_ENABLED = BX_CONST64(0x8000000000000000);