Export xpmap_pg_nx, and put it in the page table pages. It does not change

anything, since Xen removes the X bit on these; but it is better for
consistency.
This commit is contained in:
maxv 2017-01-22 19:24:51 +00:00
parent 6b21f5b784
commit 14f8c1a9b1

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_xpmap.c,v 1.69 2017/01/06 08:32:26 maxv Exp $ */ /* $NetBSD: x86_xpmap.c,v 1.70 2017/01/22 19:24:51 maxv Exp $ */
/* /*
* Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
@ -66,7 +66,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.69 2017/01/06 08:32:26 maxv Exp $"); __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.70 2017/01/22 19:24:51 maxv Exp $");
#include "opt_xen.h" #include "opt_xen.h"
#include "opt_ddb.h" #include "opt_ddb.h"
@ -105,6 +105,8 @@ unsigned long *xpmap_phys_to_machine_mapping;
kmutex_t pte_lock; kmutex_t pte_lock;
vaddr_t xen_dummy_page; vaddr_t xen_dummy_page;
pt_entry_t xpmap_pg_nx;
void xen_failsafe_handler(void); void xen_failsafe_handler(void);
#define HYPERVISOR_mmu_update_self(req, count, success_count) \ #define HYPERVISOR_mmu_update_self(req, count, success_count) \
@ -609,6 +611,7 @@ xen_locore(void)
{ {
size_t count, oldcount, mapsize; size_t count, oldcount, mapsize;
vaddr_t bootstrap_tables, init_tables; vaddr_t bootstrap_tables, init_tables;
u_int descs[4];
xen_init_features(); xen_init_features();
@ -617,6 +620,10 @@ xen_locore(void)
xpmap_phys_to_machine_mapping = xpmap_phys_to_machine_mapping =
(unsigned long *)xen_start_info.mfn_list; (unsigned long *)xen_start_info.mfn_list;
/* Set the NX/XD bit, if available. descs[3] = %edx. */
x86_cpuid(0x80000001, descs);
xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
/* Space after Xen boostrap tables should be free */ /* Space after Xen boostrap tables should be free */
init_tables = xen_start_info.pt_base; init_tables = xen_start_info.pt_base;
bootstrap_tables = init_tables + bootstrap_tables = init_tables +
@ -738,14 +745,6 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
extern char __data_start; extern char __data_start;
extern char __kernel_end; extern char __kernel_end;
extern char *early_zerop; /* from pmap.c */ extern char *early_zerop; /* from pmap.c */
pt_entry_t pg_nx;
u_int descs[4];
/*
* Set the NX/XD bit, if available. descs[3] = %edx.
*/
x86_cpuid(0x80000001, descs);
pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0;
/* /*
* Layout of RW area after the kernel image: * Layout of RW area after the kernel image:
@ -895,7 +894,7 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
page < (vaddr_t)atdevbase + IOM_SIZE) { page < (vaddr_t)atdevbase + IOM_SIZE) {
pte[pl1_pi(page)] = pte[pl1_pi(page)] =
IOM_BEGIN + (page - (vaddr_t)atdevbase); IOM_BEGIN + (page - (vaddr_t)atdevbase);
pte[pl1_pi(page)] |= pg_nx; pte[pl1_pi(page)] |= xpmap_pg_nx;
} }
#endif #endif
@ -906,15 +905,15 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
} else if (page >= (vaddr_t)&__rodata_start && } else if (page >= (vaddr_t)&__rodata_start &&
page < (vaddr_t)&__data_start) { page < (vaddr_t)&__data_start) {
/* Map the kernel rodata R. */ /* Map the kernel rodata R. */
pte[pl1_pi(page)] |= PG_RO | pg_nx; pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
} else if (page >= old_pgd && } else if (page >= old_pgd &&
page < old_pgd + (old_count * PAGE_SIZE)) { page < old_pgd + (old_count * PAGE_SIZE)) {
/* Map the old page tables R. */ /* Map the old page tables R. */
pte[pl1_pi(page)] |= PG_RO | pg_nx; pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
} else if (page >= new_pgd && } else if (page >= new_pgd &&
page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
/* Map the new page tables R. */ /* Map the new page tables R. */
pte[pl1_pi(page)] |= PG_RO | pg_nx; pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx;
#ifdef i386 #ifdef i386
} else if (page == (vaddr_t)tmpgdt) { } else if (page == (vaddr_t)tmpgdt) {
/* /*
@ -928,10 +927,10 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
} else if (page >= (vaddr_t)&__data_start && } else if (page >= (vaddr_t)&__data_start &&
page < (vaddr_t)&__kernel_end) { page < (vaddr_t)&__kernel_end) {
/* Map the kernel data+bss RW. */ /* Map the kernel data+bss RW. */
pte[pl1_pi(page)] |= PG_RW | pg_nx; pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
} else { } else {
/* Map the page RW. */ /* Map the page RW. */
pte[pl1_pi(page)] |= PG_RW | pg_nx; pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx;
} }
page += PAGE_SIZE; page += PAGE_SIZE;
@ -962,7 +961,7 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
addr = (u_long)pde - KERNBASE; addr = (u_long)pde - KERNBASE;
for (i = 0; i < 3; i++, addr += PAGE_SIZE) { for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V | pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V |
pg_nx; xpmap_pg_nx;
} }
/* Mark tables RO, and pin L2 KERN SHADOW. */ /* Mark tables RO, and pin L2 KERN SHADOW. */
@ -978,11 +977,11 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
/* Recursive entry in pmap_kernel(). */ /* Recursive entry in pmap_kernel(). */
bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
| PG_k | PG_RO | PG_V | pg_nx; | PG_k | PG_RO | PG_V | xpmap_pg_nx;
#ifdef __x86_64__ #ifdef __x86_64__
/* Recursive entry in higher-level per-cpu PD. */ /* Recursive entry in higher-level per-cpu PD. */
bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
| PG_k | PG_RO | PG_V | pg_nx; | PG_k | PG_RO | PG_V | xpmap_pg_nx;
#endif #endif
/* Mark tables RO */ /* Mark tables RO */
@ -1061,23 +1060,16 @@ xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count,
xpq_flush_queue(); xpq_flush_queue();
} }
/* /*
* Bootstrap helper functions * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
*/ */
/*
* Mark a page readonly
* XXX: assuming vaddr = paddr + KERNBASE
*/
static void static void
xen_bt_set_readonly(vaddr_t page) xen_bt_set_readonly(vaddr_t page)
{ {
pt_entry_t entry; pt_entry_t entry;
entry = xpmap_ptom_masked(page - KERNBASE); entry = xpmap_ptom_masked(page - KERNBASE);
entry |= PG_k | PG_V; entry |= PG_k | PG_V | xpmap_pg_nx;
HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG); HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
} }