SVM and VMX share tsc offset code

This commit is contained in:
Stanislav Shwartsman 2011-12-25 19:53:23 +00:00
parent 75bda1d5cd
commit a44c1b8e1e
5 changed files with 29 additions and 34 deletions

View File

@ -644,13 +644,6 @@ typedef struct
Bit32u tsc_aux;
#endif
// TSC: Time Stamp Counter
// Instead of storing a counter and incrementing it every instruction, we
// remember the time in ticks that it was reset to zero. With a little
// algebra, we can also support setting it to something other than zero.
// Don't read this directly; use get_TSC and set_TSC to access the TSC.
Bit64u tsc_last_reset;
#if BX_CPU_LEVEL >= 6
// SYSENTER/SYSEXIT instruction msr's
Bit32u sysenter_cs_msr;
@ -951,11 +944,21 @@ public: // for now...
#if BX_CPU_LEVEL >= 5
bx_cr4_t cr4;
Bit32u cr4_suppmask;
bx_efer_t efer;
Bit32u efer_suppmask;
#endif
#if BX_CPU_LEVEL >= 5
bx_efer_t efer;
Bit32u efer_suppmask;
// TSC: Time Stamp Counter
// Instead of storing a counter and incrementing it every instruction, we
// remember the time in ticks that it was reset to zero. With a little
// algebra, we can also support setting it to something other than zero.
// Don't read this directly; use get_TSC and set_TSC to access the TSC.
Bit64u tsc_last_reset;
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
Bit64s tsc_offset;
#endif
#endif
#if BX_CPU_LEVEL >= 6
@ -4206,7 +4209,6 @@ public: // for now...
BX_SMF Bit32u VMX_Read_VTPR(void);
BX_SMF void VMX_Write_VTPR(Bit8u vtpr);
#endif
BX_SMF Bit64s VMX_TSC_Offset(void);
// vmexit reasons
BX_SMF void VMexit_Instruction(bxInstruction_c *i, Bit32u reason) BX_CPP_AttrRegparmN(2);
BX_SMF void VMexit_Event(bxInstruction_c *i, unsigned type, unsigned vector,

View File

@ -420,7 +420,12 @@ void BX_CPU_C::register_state(void)
BXRS_HEX_PARAM_FIELD(cpu, XCR0, xcr0.val32);
}
#endif
#if BX_CPU_LEVEL >= 5
BXRS_HEX_PARAM_FIELD(cpu, tsc_last_reset, tsc_last_reset);
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
BXRS_HEX_PARAM_FIELD(cpu, tsc_offset, tsc_offset);
#endif
#endif
for(n=0; n<6; n++) {
bx_segment_reg_t *segment = &BX_CPU_THIS_PTR sregs[n];
bx_list_c *sreg = new bx_list_c(cpu, strseg(segment), 12);
@ -503,7 +508,6 @@ void BX_CPU_C::register_state(void)
BXRS_HEX_PARAM_FIELD(MSR, tsc_aux, msr.tsc_aux);
}
#endif
BXRS_HEX_PARAM_FIELD(MSR, tsc_last_reset, msr.tsc_last_reset);
#if BX_CPU_LEVEL >= 6
BXRS_HEX_PARAM_FIELD(MSR, sysenter_cs_msr, msr.sysenter_cs_msr);
BXRS_HEX_PARAM_FIELD(MSR, sysenter_esp_msr, msr.sysenter_esp_msr);

View File

@ -475,14 +475,9 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPMC(bxInstruction_c *i)
#if BX_CPU_LEVEL >= 5
Bit64u BX_CPU_C::get_TSC(void)
{
Bit64u tsc = bx_pc_system.time_ticks() - BX_CPU_THIS_PTR msr.tsc_last_reset;
#if BX_SUPPORT_VMX
if (BX_CPU_THIS_PTR in_vmx_guest)
tsc += VMX_TSC_Offset();
#endif
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest)
tsc += BX_CPU_THIS_PTR vmcb.ctrls.tsc_offset;
Bit64u tsc = bx_pc_system.time_ticks() - BX_CPU_THIS_PTR tsc_last_reset;
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
tsc += BX_CPU_THIS_PTR tsc_offset;
#endif
return tsc;
}
@ -491,7 +486,7 @@ void BX_CPU_C::set_TSC(Bit64u newval)
{
// compute the correct setting of tsc_last_reset so that a get_TSC()
// will return newval
BX_CPU_THIS_PTR msr.tsc_last_reset = bx_pc_system.time_ticks() - newval;
BX_CPU_THIS_PTR tsc_last_reset = bx_pc_system.time_ticks() - newval;
// verify
BX_ASSERT(get_TSC() == newval);

View File

@ -185,17 +185,6 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_INVLPG(bxInstruction_c *i, bx_addre
}
}
Bit64s BX_CPU_C::VMX_TSC_Offset(void)
{
BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
if (VMEXIT(VMX_VM_EXEC_CTRL2_TSC_OFFSET))
return (Bit64s) BX_CPU_THIS_PTR vmcs.tsc_offset;
else
return 0;
}
void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_RDTSC(bxInstruction_c *i)
{
BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);

View File

@ -324,7 +324,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
vm->vm_exceptions_bitmap = VMread32(VMCS_32BIT_CONTROL_EXECUTION_BITMAP);
vm->vm_pf_mask = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MASK);
vm->vm_pf_match = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MATCH);
vm->tsc_offset = VMread64(VMCS_64BIT_CONTROL_TSC_OFFSET);
vm->vm_cr0_mask = VMread_natural(VMCS_CONTROL_CR0_GUEST_HOST_MASK);
vm->vm_cr4_mask = VMread_natural(VMCS_CONTROL_CR4_GUEST_HOST_MASK);
vm->vm_cr0_read_shadow = VMread_natural(VMCS_CONTROL_CR0_READ_SHADOW);
@ -1493,6 +1492,11 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
// Load Guest State -> VMENTER
//
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_TSC_OFFSET)
BX_CPU_THIS_PTR tsc_offset = VMread64(VMCS_64BIT_CONTROL_TSC_OFFSET);
else
BX_CPU_THIS_PTR tsc_offset = 0;
#if BX_SUPPORT_X86_64
#if BX_SUPPORT_VMX >= 2
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
@ -1925,6 +1929,8 @@ void BX_CPU_C::VMexitLoadHostState(void)
bx_bool x86_64_host = 0;
Bit32u vmexit_ctrls = BX_CPU_THIS_PTR vmcs.vmexit_ctrls;
BX_CPU_THIS_PTR tsc_offset = 0;
#if BX_SUPPORT_X86_64
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_HOST_ADDR_SPACE_SIZE) {
BX_DEBUG(("VMEXIT to x86-64 host"));
@ -3172,7 +3178,6 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_pf_match, BX_CPU_THIS_PTR vmcs.vm_pf_match);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr1, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[0]);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr2, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[1]);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, tsc_offset, BX_CPU_THIS_PTR vmcs.tsc_offset);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, msr_bitmap_addr, BX_CPU_THIS_PTR vmcs.msr_bitmap_addr);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_mask, BX_CPU_THIS_PTR vmcs.vm_cr0_mask);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_read_shadow, BX_CPU_THIS_PTR vmcs.vm_cr0_read_shadow);