Introduce a UVM_KMF_EXEC flag for uvm_km_alloc() which enforces an

executable mapping. Up to now, only R+W was requested from pmap_kenter_pa.
On most CPUs, we get an executable mapping anyway, due to lack of
hardware support or due to lazyness in the pmap implementation. Only
alpha does obey VM_PROT_EXECUTE, afaics.
This commit is contained in:
drochner 2006-07-05 14:26:42 +00:00
parent e4f8241b5f
commit ef8848c74a
2 changed files with 9 additions and 5 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.114 2006/05/19 15:08:14 yamt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.115 2006/07/05 14:26:42 drochner Exp $ */
/*
*
@ -167,6 +167,7 @@ typedef off_t voff_t; /* XXX: offset within a uvm_object */
#define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED)
#define UVM_KMF_CANFAIL 0x8 /* caller handles failure */
#define UVM_KMF_ZERO 0x10 /* want zero filled memory */
#define UVM_KMF_EXEC 0x20 /* need executable mapping */
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
#define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */
#define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.88 2006/05/25 14:27:28 yamt Exp $ */
/* $NetBSD: uvm_km.c,v 1.89 2006/07/05 14:26:42 drochner Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.88 2006/05/25 14:27:28 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.89 2006/07/05 14:26:42 drochner Exp $");
#include "opt_uvmhist.h"
@ -534,6 +534,7 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
struct vm_page *pg;
struct uvm_object *obj;
int pgaflags;
vm_prot_t prot;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(vm_map_pmap(map) == pmap_kernel());
@ -591,6 +592,9 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
pgaflags = UVM_PGA_USERESERVE;
if (flags & UVM_KMF_ZERO)
pgaflags |= UVM_PGA_ZERO;
prot = VM_PROT_READ | VM_PROT_WRITE;
if (flags & UVM_KMF_EXEC)
prot |= VM_PROT_EXECUTE;
while (loopsize) {
KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
@ -620,8 +624,7 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
* map it in
*/
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot);
loopva += PAGE_SIZE;
offset += PAGE_SIZE;
loopsize -= PAGE_SIZE;