Make it possible for an emulator to set the protection of the guest pages.

For some reason I had initially concluded that it wasn't doable; verily it
is, so let's do it.

The reserved 'flags' argument of nvmm_gpa_map() becomes 'prot' and takes
mmap-like protection codes.
This commit is contained in:
maxv 2019-03-21 20:21:40 +00:00
parent cbdf9e6245
commit e8b93c6953
9 changed files with 68 additions and 43 deletions

View File

@ -1,4 +1,4 @@
.\" $NetBSD: libnvmm.3,v 1.11 2019/02/05 15:03:35 wiz Exp $
.\" $NetBSD: libnvmm.3,v 1.12 2019/03/21 20:21:40 maxv Exp $
.\"
.\" Copyright (c) 2018, 2019 The NetBSD Foundation, Inc.
.\" All rights reserved.
@ -27,7 +27,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd February 5, 2019
.Dd March 19, 2019
.Dt LIBNVMM 3
.Os
.Sh NAME
@ -68,7 +68,7 @@
.Fn nvmm_hva_unmap "struct nvmm_machine *mach" "uintptr_t hva" "size_t size"
.Ft int
.Fn nvmm_gpa_map "struct nvmm_machine *mach" "uintptr_t hva" "gpaddr_t gpa" \
"size_t size" "int flags"
"size_t size" "int prot"
.Ft int
.Fn nvmm_gpa_unmap "struct nvmm_machine *mach" "uintptr_t hva" "gpaddr_t gpa" \
"size_t size"

View File

@ -1,4 +1,4 @@
/* $NetBSD: libnvmm.c,v 1.6 2018/12/27 07:22:31 maxv Exp $ */
/* $NetBSD: libnvmm.c,v 1.7 2019/03/21 20:21:40 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -374,7 +374,7 @@ nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
int
nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
size_t size, int flags)
size_t size, int prot)
{
struct nvmm_ioc_gpa_map args;
int ret;
@ -391,7 +391,7 @@ nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
args.hva = hva;
args.gpa = gpa;
args.size = size;
args.flags = flags;
args.prot = prot;
ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args);
if (ret == -1) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm.c,v 1.10 2019/03/14 19:10:27 maxv Exp $ */
/* $NetBSD: nvmm.c,v 1.11 2019/03/21 20:21:40 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.10 2019/03/14 19:10:27 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.11 2019/03/21 20:21:40 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -41,6 +41,7 @@ __KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.10 2019/03/14 19:10:27 maxv Exp $");
#include <sys/kmem.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/mman.h>
#include <uvm/uvm.h>
#include <uvm/uvm_page.h>
@ -493,7 +494,7 @@ nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
if (exit->u.mem.gpa >= mach->gpa_end) {
break;
}
if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, VM_PROT_ALL)) {
if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
break;
}
}
@ -706,6 +707,11 @@ nvmm_gpa_map(struct nvmm_ioc_gpa_map *args)
if (error)
return error;
if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
error = EINVAL;
goto out;
}
if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
(args->hva % PAGE_SIZE) != 0) {
error = EINVAL;
@ -740,7 +746,7 @@ nvmm_gpa_map(struct nvmm_ioc_gpa_map *args)
/* Map the uobj into the machine address space, as pageable. */
error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
UVM_MAPFLAG(UVM_PROT_RWX, UVM_PROT_RWX, UVM_INH_NONE,
UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
if (error) {
uao_detach(uobj);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm.h,v 1.4 2019/01/26 15:12:20 maxv Exp $ */
/* $NetBSD: nvmm.h,v 1.5 2019/03/21 20:21:40 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -64,14 +64,8 @@ enum nvmm_exit_reason {
NVMM_EXIT_INVALID = 0xFFFFFFFFFFFFFFFF
};
enum nvmm_exit_memory_perm {
NVMM_EXIT_MEMORY_READ,
NVMM_EXIT_MEMORY_WRITE,
NVMM_EXIT_MEMORY_EXEC
};
struct nvmm_exit_memory {
enum nvmm_exit_memory_perm perm;
int prot;
gpaddr_t gpa;
uint8_t inst_len;
uint8_t inst_bytes[15];

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_ioctl.h,v 1.3 2019/01/08 07:29:46 maxv Exp $ */
/* $NetBSD: nvmm_ioctl.h,v 1.4 2019/03/21 20:21:40 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -109,7 +109,7 @@ struct nvmm_ioc_gpa_map {
uintptr_t hva;
gpaddr_t gpa;
size_t size;
int flags;
int prot;
};
struct nvmm_ioc_gpa_unmap {

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_x86_svm.c,v 1.34 2019/03/14 19:15:26 maxv Exp $ */
/* $NetBSD: nvmm_x86_svm.c,v 1.35 2019/03/21 20:21:41 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.34 2019/03/14 19:15:26 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.35 2019/03/21 20:21:41 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -38,6 +38,7 @@ __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.34 2019/03/14 19:15:26 maxv Exp $
#include <sys/kmem.h>
#include <sys/cpu.h>
#include <sys/xcall.h>
#include <sys/mman.h>
#include <uvm/uvm.h>
#include <uvm/uvm_page.h>
@ -1066,11 +1067,11 @@ svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
exit->reason = NVMM_EXIT_MEMORY;
if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
exit->u.mem.prot = PROT_WRITE;
else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
exit->u.mem.prot = PROT_EXEC;
else
exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
exit->u.mem.prot = PROT_READ;
exit->u.mem.gpa = gpa;
exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_x86_vmx.c,v 1.19 2019/03/14 20:29:53 maxv Exp $ */
/* $NetBSD: nvmm_x86_vmx.c,v 1.20 2019/03/21 20:21:41 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.19 2019/03/14 20:29:53 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.20 2019/03/21 20:21:41 maxv Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -38,6 +38,7 @@ __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.19 2019/03/14 20:29:53 maxv Exp $
#include <sys/kmem.h>
#include <sys/cpu.h>
#include <sys/xcall.h>
#include <sys/mman.h>
#include <uvm/uvm.h>
#include <uvm/uvm_page.h>
@ -1600,11 +1601,11 @@ vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
exit->reason = NVMM_EXIT_MEMORY;
vmx_vmread(VMCS_EXIT_QUALIFICATION, &perm);
if (perm & VMX_EPT_VIOLATION_WRITE)
exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
exit->u.mem.prot = PROT_WRITE;
else if (perm & VMX_EPT_VIOLATION_EXECUTE)
exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
exit->u.mem.prot = PROT_EXEC;
else
exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
exit->u.mem.prot = PROT_READ;
exit->u.mem.gpa = gpa;
exit->u.mem.inst_len = 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: h_io_assist.c,v 1.4 2019/03/19 19:23:39 maxv Exp $ */
/* $NetBSD: h_io_assist.c,v 1.5 2019/03/21 20:21:41 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -126,6 +126,7 @@ static void
map_pages(struct nvmm_machine *mach)
{
pt_entry_t *L4, *L3, *L2, *L1;
int ret;
instbuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
-1, 0);
@ -140,9 +141,13 @@ map_pages(struct nvmm_machine *mach)
err(errno, "nvmm_hva_map");
if (nvmm_hva_map(mach, (uintptr_t)databuf, PAGE_SIZE) == -1)
err(errno, "nvmm_hva_map");
if (nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
PROT_READ|PROT_EXEC);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)databuf, 0x1000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)databuf, 0x1000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
L4 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
@ -171,13 +176,21 @@ map_pages(struct nvmm_machine *mach)
if (nvmm_hva_map(mach, (uintptr_t)L1, PAGE_SIZE) == -1)
err(errno, "nvmm_hva_map");
if (nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE,
if (ret == -1)
PROT_READ|PROT_WRITE);
err(errno, "nvmm_gpa_map");
memset(L4, 0, PAGE_SIZE);

View File

@ -1,4 +1,4 @@
/* $NetBSD: h_mem_assist.c,v 1.7 2019/03/19 19:23:39 maxv Exp $ */
/* $NetBSD: h_mem_assist.c,v 1.8 2019/03/21 20:21:41 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@ -131,7 +131,9 @@ map_pages(struct nvmm_machine *mach)
if (nvmm_hva_map(mach, (uintptr_t)instbuf, PAGE_SIZE) == -1)
err(errno, "nvmm_hva_map");
if (nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
PROT_READ|PROT_EXEC);
if (ret == -1)
err(errno, "nvmm_gpa_map");
L4 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
@ -160,13 +162,21 @@ map_pages(struct nvmm_machine *mach)
if (nvmm_hva_map(mach, (uintptr_t)L1, PAGE_SIZE) == -1)
err(errno, "nvmm_hva_map");
if (nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
if (nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE, 0) == -1)
ret = nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE,
PROT_READ|PROT_WRITE);
if (ret == -1)
err(errno, "nvmm_gpa_map");
memset(L4, 0, PAGE_SIZE);