Improvements in NVMM

* Handle the FPU differently, limit the states via the given mask rather
   than via XCR0. Align to 64 bytes. Provide an initial gXCR0, to be sure
   that XCR0_X87 is set. Reset XSTATE_BV when the state is modified by
   the virtualizer, to force a reload from memory.

 * Hide RDTSCP.

 * Zero-extend RBX/RCX/RDX when handling the NVMM CPUID signature.

 * Take ECX and not RCX on MSR instructions.
This commit is contained in:
maxv 2019-01-20 16:55:21 +00:00
parent f4728349c8
commit 116f85b12d
3 changed files with 42 additions and 34 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: fpu.h,v 1.13 2018/10/05 18:51:52 maxv Exp $ */
/* $NetBSD: fpu.h,v 1.14 2019/01/20 16:55:21 maxv Exp $ */
#ifndef _X86_FPU_H_
#define _X86_FPU_H_
@ -14,8 +14,8 @@ struct trapframe;
void fpuinit(struct cpu_info *);
void fpuinit_mxcsr_mask(void);
void fpu_area_save(void *);
void fpu_area_restore(void *);
void fpu_area_save(void *, uint64_t);
void fpu_area_restore(void *, uint64_t);
void fpusave_lwp(struct lwp *, bool);
void fpusave_cpu(bool);

View File

@ -1,4 +1,4 @@
/* $NetBSD: fpu.c,v 1.48 2018/10/05 18:51:52 maxv Exp $ */
/* $NetBSD: fpu.c,v 1.49 2019/01/20 16:55:21 maxv Exp $ */
/*
* Copyright (c) 2008 The NetBSD Foundation, Inc. All
@ -96,7 +96,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.48 2018/10/05 18:51:52 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.49 2019/01/20 16:55:21 maxv Exp $");
#include "opt_multiprocessor.h"
@ -209,7 +209,7 @@ fpu_clear_amd(void)
}
void
fpu_area_save(void *area)
fpu_area_save(void *area, uint64_t xsave_features)
{
clts();
@ -221,16 +221,16 @@ fpu_area_save(void *area)
fxsave(area);
break;
case FPU_SAVE_XSAVE:
xsave(area, x86_xsave_features);
xsave(area, xsave_features);
break;
case FPU_SAVE_XSAVEOPT:
xsaveopt(area, x86_xsave_features);
xsaveopt(area, xsave_features);
break;
}
}
void
fpu_area_restore(void *area)
fpu_area_restore(void *area, uint64_t xsave_features)
{
clts();
@ -247,7 +247,7 @@ fpu_area_restore(void *area)
case FPU_SAVE_XSAVEOPT:
if (cpu_vendor == CPUVENDOR_AMD)
fpu_clear_amd();
xrstor(area, x86_xsave_features);
xrstor(area, xsave_features);
break;
}
}
@ -262,7 +262,7 @@ fpu_lwp_install(struct lwp *l)
KASSERT(pcb->pcb_fpcpu == NULL);
ci->ci_fpcurlwp = l;
pcb->pcb_fpcpu = ci;
fpu_area_restore(&pcb->pcb_savefpu);
fpu_area_restore(&pcb->pcb_savefpu, x86_xsave_features);
}
void
@ -532,7 +532,7 @@ fpusave_cpu(bool save)
pcb = lwp_getpcb(l);
if (save) {
fpu_area_save(&pcb->pcb_savefpu);
fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features);
}
stts();

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_x86_svm.c,v 1.15 2019/01/13 10:07:50 maxv Exp $ */
/* $NetBSD: nvmm_x86_svm.c,v 1.16 2019/01/20 16:55:21 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.15 2019/01/13 10:07:50 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.16 2019/01/20 16:55:21 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -524,7 +524,7 @@ struct svm_cpudata {
uint64_t fsbase;
uint64_t kernelgsbase;
bool ts_set;
struct xsave_header hfpu __aligned(16);
struct xsave_header hfpu __aligned(64);
/* Event state */
bool int_window_exit;
@ -535,7 +535,7 @@ struct svm_cpudata {
uint64_t gprs[NVMM_X64_NGPR];
uint64_t drs[NVMM_X64_NDR];
uint64_t tsc_offset;
struct xsave_header gfpu __aligned(16);
struct xsave_header gfpu __aligned(64);
};
static void
@ -779,12 +779,16 @@ svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
break;
case 0x40000000:
cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
break;
case 0x80000001: /* No SVM in ECX. The rest is tunable. */
case 0x80000001: /* No SVM, no RDTSCP. The rest is tunable. */
cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID_SVM;
cpudata->gprs[NVMM_X64_GPR_RDX] &= ~CPUID_RDTSCP;
break;
default:
break;
@ -1007,7 +1011,7 @@ svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
}
exit->u.msr.msr = cpudata->gprs[NVMM_X64_GPR_RCX];
exit->u.msr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
if (info == 1) {
uint64_t rdx, rax;
@ -1092,15 +1096,15 @@ svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
{
struct svm_cpudata *cpudata = vcpu->cpudata;
if (x86_xsave_features != 0) {
cpudata->ts_set = (rcr0() & CR0_TS) != 0;
fpu_area_save(&cpudata->hfpu, svm_xcr0_mask);
fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
if (svm_xcr0_mask != 0) {
cpudata->hxcr0 = rdxcr(0);
wrxcr(0, cpudata->gxcr0);
}
cpudata->ts_set = (rcr0() & CR0_TS) != 0;
fpu_area_save(&cpudata->hfpu);
fpu_area_restore(&cpudata->gfpu);
}
static void
@ -1108,17 +1112,17 @@ svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
{
struct svm_cpudata *cpudata = vcpu->cpudata;
fpu_area_save(&cpudata->gfpu);
fpu_area_restore(&cpudata->hfpu);
if (svm_xcr0_mask != 0) {
cpudata->gxcr0 = rdxcr(0);
wrxcr(0, cpudata->hxcr0);
}
fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
fpu_area_restore(&cpudata->hfpu, svm_xcr0_mask);
if (cpudata->ts_set) {
stts();
}
if (x86_xsave_features != 0) {
cpudata->gxcr0 = rdxcr(0);
wrxcr(0, cpudata->hxcr0);
}
}
static void
@ -1580,6 +1584,7 @@ svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
/* Must always be set. */
vmcb->state.efer = EFER_SVME;
cpudata->gxcr0 = XCR0_X87;
/* Init XSAVE header. */
cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
@ -1779,13 +1784,11 @@ svm_vcpu_setstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
VMCB_CTRL_V_TPR);
/* Clear unsupported XCR0 bits, set mandatory X87 bit. */
if (svm_xcr0_mask != 0) {
/* Clear illegal XCR0 bits, set mandatory X87 bit. */
cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
cpudata->gxcr0 &= svm_xcr0_mask;
cpudata->gxcr0 |= XCR0_X87;
} else {
cpudata->gxcr0 = 0;
}
}
@ -1846,6 +1849,11 @@ svm_vcpu_setstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
if (svm_xcr0_mask != 0) {
/* Reset XSTATE_BV, to force a reload. */
cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
}
}
svm_vmcb_cache_update(vmcb, flags);