add support for non-executable mappings (where the hardware allows this)

and make the stack and heap non-executable by default.  the changes
fall into two basic catagories:

 - pmap and trap-handler changes.  these are all MD:
   = alpha: we already track per-page execute permission with the (software)
	PG_EXEC bit, so just have the trap handler pay attention to it.
   = i386: use a new GDT segment for %cs for processes that have no
	executable mappings above a certain threshold (currently the
	bottom of the stack).  track per-page execute permission with
	the last unused PTE bit.
   = powerpc/ibm4xx: just use the hardware exec bit.
   = powerpc/oea: we already track per-page exec bits, but the hardware only
	implements non-exec mappings at the segment level.  so track the
	number of executable mappings in each segment and turn on the no-exec
	segment bit iff the count is 0.  adjust the trap handler to deal.
   = sparc (sun4m): fix our use of the hardware protection bits.
	fix the trap handler to recognize text faults.
   = sparc64: split the existing unified TSB into data and instruction TSBs,
	and only load TTEs into the appropriate TSB(s) for the permissions.
	fix the trap handler to check for execute permission.
   = not yet implemented: amd64, hppa, sh5

 - changes in all the emulations that put a signal trampoline on the stack.
   instead, we now put the trampoline into a uvm_aobj and map that into
   the process separately.

originally from openbsd, adapted for netbsd by me.
This commit is contained in:
chs 2003-08-24 17:52:28 +00:00
parent 4b28d28d70
commit 939df36e55
52 changed files with 842 additions and 663 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.202 2003/08/07 16:26:32 agc Exp $ */
/* $NetBSD: pmap.c,v 1.203 2003/08/24 17:52:28 chs Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@ -145,7 +145,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.202 2003/08/07 16:26:32 agc Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.203 2003/08/24 17:52:28 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -1554,9 +1554,6 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
return;
}
if (prot & VM_PROT_WRITE)
return;
PMAP_LOCK(pmap);
bits = pte_prot(pmap, prot);
@ -2518,39 +2515,23 @@ alpha_protection_init(void)
up = protection_codes[1];
for (prot = 0; prot < 8; prot++) {
kp[prot] = 0; up[prot] = 0;
switch (prot) {
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
kp[prot] |= PG_ASM;
up[prot] |= 0;
break;
kp[prot] = PG_ASM;
up[prot] = 0;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
kp[prot] |= PG_EXEC; /* software */
up[prot] |= PG_EXEC; /* software */
/* FALLTHROUGH */
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
kp[prot] |= PG_ASM | PG_KRE;
up[prot] |= PG_URE | PG_KRE;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
kp[prot] |= PG_ASM | PG_KWE;
up[prot] |= PG_UWE | PG_KWE;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
kp[prot] |= PG_EXEC; /* software */
up[prot] |= PG_EXEC; /* software */
/* FALLTHROUGH */
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
kp[prot] |= PG_ASM | PG_KWE | PG_KRE;
up[prot] |= PG_UWE | PG_URE | PG_KWE | PG_KRE;
break;
if (prot & VM_PROT_READ) {
kp[prot] |= PG_KRE;
up[prot] |= PG_KRE | PG_URE;
}
if (prot & VM_PROT_WRITE) {
kp[prot] |= PG_KWE;
up[prot] |= PG_KWE | PG_UWE;
}
if (prot & VM_PROT_EXECUTE) {
kp[prot] |= PG_EXEC | PG_KRE;
up[prot] |= PG_EXEC | PG_KRE | PG_URE;
} else {
kp[prot] |= PG_FOE;
up[prot] |= PG_FOE;
}
}
}
@ -2722,20 +2703,24 @@ pmap_changebit(struct vm_page *pg, u_long set, u_long mask, long cpu_id)
* pmap_emulate_reference:
*
* Emulate reference and/or modified bit hits.
* Return 1 if this was an execute fault on a non-exec mapping,
* otherwise return 0.
*/
void
pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
int
pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type)
{
struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
pt_entry_t faultoff, *pte;
struct vm_page *pg;
paddr_t pa;
boolean_t didlock = FALSE;
boolean_t exec = FALSE;
long cpu_id = cpu_number();
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n",
l, v, user, write);
l, v, user, type);
#endif
/*
@ -2755,13 +2740,19 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
if (l->l_proc->p_vmspace == NULL)
panic("pmap_emulate_reference: bad p_vmspace");
#endif
PMAP_LOCK(l->l_proc->p_vmspace->vm_map.pmap);
PMAP_LOCK(pmap);
didlock = TRUE;
pte = pmap_l3pte(l->l_proc->p_vmspace->vm_map.pmap, v, NULL);
pte = pmap_l3pte(pmap, v, NULL);
/*
* We'll unlock below where we're done with the PTE.
*/
}
exec = pmap_pte_exec(pte);
if (!exec && type == ALPHA_MMCSR_FOE) {
if (didlock)
PMAP_UNLOCK(pmap);
return (1);
}
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
printf("\tpte = %p, ", pte);
@ -2777,7 +2768,7 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
* pmap_emulate_reference(), and the bits aren't guaranteed,
* for them...
*/
if (write) {
if (type == ALPHA_MMCSR_FOW) {
if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE)))
panic("pmap_emulate_reference: write but unwritable");
if (!(*pte & PG_FOW))
@ -2798,7 +2789,7 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
* it now.
*/
if (didlock)
PMAP_UNLOCK(l->l_proc->p_vmspace->vm_map.pmap);
PMAP_UNLOCK(pmap);
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
@ -2806,7 +2797,8 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
#endif
#ifdef DIAGNOSTIC
if (!PAGE_IS_MANAGED(pa))
panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", l, v, user, write, pa);
panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): "
"pa 0x%lx not managed", l, v, user, type, pa);
#endif
/*
@ -2822,17 +2814,21 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int write)
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
if (write) {
if (type == ALPHA_MMCSR_FOW) {
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
faultoff = PG_FOR | PG_FOW | PG_FOE;
faultoff = PG_FOR | PG_FOW;
} else {
pg->mdpage.pvh_attrs |= PGA_REFERENCED;
faultoff = PG_FOR | PG_FOE;
faultoff = PG_FOR;
if (exec) {
faultoff |= PG_FOE;
}
}
pmap_changebit(pg, 0, ~faultoff, cpu_id);
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (0);
}
#ifdef DEBUG

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.80 2003/06/23 11:01:00 martin Exp $ */
/* $NetBSD: trap.c,v 1.81 2003/08/24 17:52:29 chs Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@ -100,7 +100,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.80 2003/06/23 11:01:00 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.81 2003/08/24 17:52:29 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -232,17 +232,16 @@ void
trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
struct trapframe *framep)
{
register struct lwp *l;
register int i;
struct lwp *l;
u_int64_t ucode;
int user;
vm_prot_t ftype;
int i, user;
#if defined(DDB)
int call_debugger = 1;
#endif
l = curlwp;
uvmexp.traps++; /* XXXSMP: NOT ATOMIC */
ucode = 0;
user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
if (user)
@ -360,8 +359,10 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
else
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
pmap_emulate_reference(l, a0, user,
a1 == ALPHA_MMCSR_FOW ? 1 : 0);
if (pmap_emulate_reference(l, a0, user, a1)) {
ftype = VM_PROT_EXECUTE;
goto do_fault;
}
if (user)
KERNEL_PROC_UNLOCK(l);
@ -372,12 +373,23 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
case ALPHA_MMCSR_INVALTRANS:
case ALPHA_MMCSR_ACCESS:
{
register vaddr_t va;
register struct vmspace *vm = NULL;
register struct vm_map *map;
vm_prot_t ftype;
vaddr_t va;
struct vmspace *vm = NULL;
struct vm_map *map;
int rv;
switch (a2) {
case -1: /* instruction fetch fault */
ftype = VM_PROT_EXECUTE;
break;
case 0: /* load instruction */
ftype = VM_PROT_READ;
break;
case 1: /* store instruction */
ftype = VM_PROT_WRITE;
break;
}
if (user)
KERNEL_PROC_LOCK(l);
else {
@ -427,6 +439,7 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
* The last can occur during an exec() copyin where the
* argument space is lazy-allocated.
*/
do_fault:
if (user == 0 && (a0 >= VM_MIN_KERNEL_ADDRESS ||
l->l_addr->u_pcb.pcb_onfault == 0))
map = kernel_map;
@ -435,28 +448,11 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
map = &vm->vm_map;
}
switch (a2) {
case -1: /* instruction fetch fault */
case 0: /* load instruction */
ftype = VM_PROT_READ;
break;
case 1: /* store instruction */
ftype = VM_PROT_WRITE;
break;
#ifdef DIAGNOSTIC
default: /* XXX gcc -Wuninitialized */
if (user)
KERNEL_PROC_UNLOCK(l);
else
KERNEL_UNLOCK();
goto dopanic;
#endif
}
va = trunc_page((vaddr_t)a0);
rv = uvm_fault(map, va,
(a1 == ALPHA_MMCSR_INVALTRANS) ?
VM_FAULT_INVALID : VM_FAULT_PROTECT, ftype);
/*
* If this was a stack access we keep track of the
* maximum accessed stack size. Also, if vm_fault
@ -614,7 +610,7 @@ alpha_enable_fp(struct lwp *l, int check)
void
ast(struct trapframe *framep)
{
register struct lwp *l;
struct lwp *l;
/*
* We may not have a current process to do AST processing

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.62 2003/08/07 16:26:33 agc Exp $ */
/* $NetBSD: pmap.h,v 1.63 2003/08/24 17:52:30 chs Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@ -258,9 +258,8 @@ boolean_t pmap_pageidlezero(paddr_t);
paddr_t vtophys(vaddr_t);
/* Machine-specific functions. */
void pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
void pmap_emulate_reference(struct lwp *p, vaddr_t v,
int user, int write);
void pmap_bootstrap(paddr_t, u_int, u_long);
int pmap_emulate_reference(struct lwp *, vaddr_t, int, int);
#ifdef _PMAP_MAY_USE_PROM_CONSOLE
int pmap_uses_prom_console(void);
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.27 2002/10/14 05:11:23 chs Exp $ */
/* $NetBSD: pte.h,v 1.28 2003/08/24 17:52:30 chs Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -98,12 +98,12 @@ typedef alpha_pt_entry_t pt_entry_t;
#define PG_URE ALPHA_PTE_UR
#define PG_KWE ALPHA_PTE_KW
#define PG_UWE ALPHA_PTE_UW
#define PG_PROT ALPHA_PTE_PROT
#define PG_PROT (ALPHA_PTE_PROT | PG_EXEC | PG_FOE)
#define PG_RSVD 0x000000000000cc80 /* Reserved for hardware */
#define PG_WIRED 0x0000000000010000 /* Wired. [SOFTWARE] */
#define PG_PVLIST 0x0000000000020000 /* on pv list [SOFTWARE] */
#define PG_EXEC 0x0000000000040000 /* execute perms [SOFTWARE] */
#define PG_FRAME ALPHA_PTE_RAME
#define PG_FRAME ALPHA_PTE_PFN
#define PG_SHIFT 32
#define PG_PFNUM(x) ALPHA_PTE_TO_PFN(x)

View File

@ -1,4 +1,4 @@
/* $NetBSD: freebsd_machdep.c,v 1.37 2003/08/20 21:48:36 fvdl Exp $ */
/* $NetBSD: freebsd_machdep.c,v 1.38 2003/08/24 17:52:30 chs Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: freebsd_machdep.c,v 1.37 2003/08/20 21:48:36 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: freebsd_machdep.c,v 1.38 2003/08/24 17:52:30 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
@ -181,7 +181,7 @@ freebsd_sendsig(sig, mask, code)
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eip = (int)p->p_sigctx.ps_sigcode;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
tf->tf_esp = (int)fp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ibcs2_machdep.c,v 1.22 2003/06/23 11:01:18 martin Exp $ */
/* $NetBSD: ibcs2_machdep.c,v 1.23 2003/08/24 17:52:30 chs Exp $ */
/*-
* Copyright (c) 1997, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ibcs2_machdep.c,v 1.22 2003/06/23 11:01:18 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: ibcs2_machdep.c,v 1.23 2003/08/24 17:52:30 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
@ -85,6 +85,7 @@ ibcs2_setregs(l, epp, stack)
pcb->pcb_savefpu.sv_87.sv_env.en_cw = __iBCS2_NPXCW__;
tf = l->l_md.md_regs;
tf->tf_eax = 0x2000000; /* XXX base of heap */
tf->tf_cs = GSEL(LUCODEBIG_SEL, SEL_UPL);
}
/*
@ -188,7 +189,7 @@ ibcs2_sendsig(sig, mask, code)
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eip = (int)catcher;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
tf->tf_esp = (int)fp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.532 2003/08/20 21:48:38 fvdl Exp $ */
/* $NetBSD: machdep.c,v 1.533 2003/08/24 17:52:30 chs Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
@ -72,7 +72,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.532 2003/08/20 21:48:38 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.533 2003/08/24 17:52:30 chs Exp $");
#include "opt_cputype.h"
#include "opt_ddb.h"
@ -600,6 +600,7 @@ sendsig(sig, mask, code)
{
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct pmap *pmap = vm_map_pmap(&p->p_vmspace->vm_map);
struct sigacts *ps = p->p_sigacts;
struct trapframe *tf;
struct sigframe *fp, frame;
@ -716,7 +717,8 @@ sendsig(sig, mask, code)
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eip = (int)catcher;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
GSEL(GUCODEBIG_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
tf->tf_esp = (int)fp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
@ -730,6 +732,7 @@ sendsig(sig, mask, code)
void
cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas, void *ap, void *sp, sa_upcall_t upcall)
{
struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
struct saframe *sf, frame;
struct trapframe *tf;
@ -757,7 +760,8 @@ cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas, vo
tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
GSEL(GUCODEBIG_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
}
@ -1227,6 +1231,7 @@ setregs(l, pack, stack)
struct exec_package *pack;
u_long stack;
{
struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
struct pcb *pcb = &l->l_addr->u_pcb;
struct trapframe *tf;
@ -1260,7 +1265,8 @@ setregs(l, pack, stack)
tf->tf_ecx = 0;
tf->tf_eax = 0;
tf->tf_eip = pack->ep_entry;
tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL);
tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
LSEL(LUCODEBIG_SEL, SEL_UPL) : LSEL(LUCODE_SEL, SEL_UPL);
tf->tf_eflags = PSL_USERSET;
tf->tf_esp = stack;
tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
@ -1460,7 +1466,9 @@ initgdt(union descriptor *tgdt)
/* make gdt gates and memory segments */
setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1);
setsegment(&gdt[GUCODE_SEL].sd, 0, x86_btop(VM_MAXUSER_ADDRESS) - 1,
setsegment(&gdt[GUCODE_SEL].sd, 0, x86_btop(I386_MAX_EXE_ADDR) - 1,
SDT_MEMERA, SEL_UPL, 1, 1);
setsegment(&gdt[GUCODEBIG_SEL].sd, 0, x86_btop(VM_MAXUSER_ADDRESS) - 1,
SDT_MEMERA, SEL_UPL, 1, 1);
setsegment(&gdt[GUDATA_SEL].sd, 0, x86_btop(VM_MAXUSER_ADDRESS) - 1,
SDT_MEMRWA, SEL_UPL, 1, 1);
@ -1980,6 +1988,7 @@ init386(first_avail)
SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
ldt[LUCODEBIG_SEL] = gdt[GUCODEBIG_SEL];
ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.156 2003/07/22 13:55:31 yamt Exp $ */
/* $NetBSD: pmap.c,v 1.157 2003/08/24 17:52:31 chs Exp $ */
/*
*
@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.156 2003/07/22 13:55:31 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.157 2003/08/24 17:52:31 chs Exp $");
#include "opt_cputype.h"
#include "opt_user_ldt.h"
@ -703,6 +703,68 @@ pmap_unmap_ptes(pmap)
}
}
__inline static void
pmap_exec_account(struct pmap *pm, vaddr_t va, pt_entry_t opte, pt_entry_t npte)
{
if (curproc == NULL || curproc->p_vmspace == NULL ||
pm != vm_map_pmap(&curproc->p_vmspace->vm_map))
return;
if ((opte ^ npte) & PG_X)
pmap_update_pg(va);
/*
* Executability was removed on the last executable change.
* Reset the code segment to something conservative and
* let the trap handler deal with setting the right limit.
* We can't do that because of locking constraints on the vm map.
*/
if ((opte & PG_X) && (npte & PG_X) == 0 && va == pm->pm_hiexec) {
struct trapframe *tf = curlwp->l_md.md_regs;
struct pcb *pcb = &curlwp->l_addr->u_pcb;
pcb->pcb_cs = tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
pm->pm_hiexec = I386_MAX_EXE_ADDR;
}
}
/*
* Fixup the code segment to cover all potential executable mappings.
* returns 0 if no changes to the code segment were made.
*/
int
pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb)
{
struct vm_map_entry *ent;
struct pmap *pm = vm_map_pmap(map);
vaddr_t va = 0;
vm_map_lock_read(map);
for (ent = (&map->header)->next; ent != &map->header; ent = ent->next) {
/*
* This entry has greater va than the entries before.
* We need to make it point to the last page, not past it.
*/
if (ent->protection & VM_PROT_EXECUTE)
va = trunc_page(ent->end) - PAGE_SIZE;
}
vm_map_unlock_read(map);
if (va == pm->pm_hiexec)
return (0);
pm->pm_hiexec = va;
if (pm->pm_hiexec > I386_MAX_EXE_ADDR) {
pcb->pcb_cs = tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
} else {
pcb->pcb_cs = tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
}
return (1);
}
/*
* p m a p k e n t e r f u n c t i o n s
*
@ -836,13 +898,13 @@ pmap_bootstrap(kva_start)
*/
protection_codes[VM_PROT_NONE] = 0; /* --- */
protection_codes[VM_PROT_EXECUTE] = PG_RO; /* --x */
protection_codes[VM_PROT_EXECUTE] = PG_X; /* --x */
protection_codes[VM_PROT_READ] = PG_RO; /* -r- */
protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO; /* -rx */
protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO|PG_X;/* -rx */
protection_codes[VM_PROT_WRITE] = PG_RW; /* w-- */
protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;/* w-x */
protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW|PG_X;/* w-x */
protection_codes[VM_PROT_WRITE|VM_PROT_READ] = PG_RW; /* wr- */
protection_codes[VM_PROT_ALL] = PG_RW; /* wrx */
protection_codes[VM_PROT_ALL] = PG_RW|PG_X; /* wrx */
/*
* now we init the kernel's pmap
@ -1614,6 +1676,7 @@ pmap_create()
pmap->pm_stats.wired_count = 0;
pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */
pmap->pm_ptphint = NULL;
pmap->pm_hiexec = 0;
pmap->pm_flags = 0;
/* init the LDT */
@ -2114,6 +2177,7 @@ pmap_remove_ptes(pmap, ptp, ptpva, startva, endva, cpumaskp, flags)
/* atomically save the old PTE and zap! it */
opte = x86_atomic_testset_ul(pte, 0);
pmap_exec_account(pmap, startva, opte, 0);
if (opte & PG_W)
pmap->pm_stats.wired_count--;
@ -2196,6 +2260,7 @@ pmap_remove_pte(pmap, ptp, pte, va, cpumaskp, flags)
/* atomically save the old PTE and zap! it */
opte = x86_atomic_testset_ul(pte, 0);
pmap_exec_account(pmap, va, opte, 0);
if (opte & PG_W)
pmap->pm_stats.wired_count--;
@ -3042,6 +3107,7 @@ enter_now:
if (ptp)
ptp->wire_count += ptpdelta;
npte = pa | protection_codes[prot] | PG_V;
pmap_exec_account(pmap, va, opte, npte);
if (pvh)
npte |= PG_PVLIST;
if (wired)

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.c,v 1.68 2003/08/20 21:48:41 fvdl Exp $ */
/* $NetBSD: svr4_machdep.c,v 1.69 2003/08/24 17:52:32 chs Exp $ */
/*-
* Copyright (c) 1994, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.68 2003/08/20 21:48:41 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.69 2003/08/24 17:52:32 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
@ -125,12 +125,14 @@ svr4_setregs(l, epp, stack)
u_long stack;
{
register struct pcb *pcb = &l->l_addr->u_pcb;
struct trapframe *tf = l->l_md.md_regs;
setregs(l, epp, stack);
if (i386_use_fxsave)
pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __SVR4_NPXCW__;
else
pcb->pcb_savefpu.sv_87.sv_env.en_cw = __SVR4_NPXCW__;
tf->tf_cs = GSEL(LUCODEBIG_SEL, SEL_UPL);
}
void *
@ -436,7 +438,7 @@ svr4_sendsig(sig, mask, code)
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eip = (int)p->p_sigctx.ps_sigcode;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
tf->tf_esp = (int)fp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.184 2003/08/20 21:48:43 fvdl Exp $ */
/* $NetBSD: trap.c,v 1.185 2003/08/24 17:52:32 chs Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.184 2003/08/20 21:48:43 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.185 2003/08/24 17:52:32 chs Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -393,6 +393,12 @@ copyfault:
goto out;
}
#endif
/* If pmap_exec_fixup does something, let's retry the trap. */
if (pmap_exec_fixup(&p->p_vmspace->vm_map, frame,
&l->l_addr->u_pcb)) {
goto out;
}
case T_TSSFLT|T_USER:
case T_SEGNPFLT|T_USER:
case T_STKFLT|T_USER:

View File

@ -1,4 +1,4 @@
/* $NetBSD: pcb.h,v 1.34 2003/08/07 16:27:59 agc Exp $ */
/* $NetBSD: pcb.h,v 1.35 2003/08/24 17:52:33 chs Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -95,6 +95,7 @@ struct pcb {
#define pcb_cr3 pcb_tss.tss_cr3
#define pcb_esp pcb_tss.tss_esp
#define pcb_ebp pcb_tss.tss_ebp
#define pcb_cs pcb_tss.__tss_cs
#define pcb_ldt_sel pcb_tss.tss_ldt
int pcb_cr0; /* saved image of CR0 */
int pcb_cr2; /* page fault address (CR2) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.74 2003/07/22 13:55:33 yamt Exp $ */
/* $NetBSD: pmap.h,v 1.75 2003/08/24 17:52:33 chs Exp $ */
/*
*
@ -206,7 +206,7 @@
#define PG_W PG_AVAIL1 /* "wired" mapping */
#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
/* PG_AVAIL3 not used */
#define PG_X PG_AVAIL3 /* executable mapping */
/*
* Number of PTE's per cache line. 4 byte pte, 32-byte cache line
@ -247,6 +247,7 @@ struct pmap {
struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
vaddr_t pm_hiexec; /* highest executable mapping */
int pm_flags; /* see below */
union descriptor *pm_ldt; /* user-set LDT */
@ -346,6 +347,8 @@ void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
boolean_t pmap_test_attrs __P((struct vm_page *, int));
void pmap_write_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
int pmap_exec_fixup(struct vm_map *, struct trapframe *,
struct pcb *);
vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.13 2003/04/02 07:35:59 thorpej Exp $ */
/* $NetBSD: pte.h,v 1.14 2003/08/24 17:52:33 chs Exp $ */
/*
*
@ -163,7 +163,7 @@ typedef u_int32_t pt_entry_t; /* PTE */
#define PG_RO 0x00000000 /* read-only page */
#define PG_RW 0x00000002 /* read-write page */
#define PG_u 0x00000004 /* user accessible page */
#define PG_PROT 0x00000006 /* all protection bits */
#define PG_PROT 0x00000806 /* all protection bits */
#define PG_N 0x00000018 /* non-cacheable */
#define PG_U 0x00000020 /* has been used */
#define PG_M 0x00000040 /* has been modified */

View File

@ -1,4 +1,4 @@
/* $NetBSD: segments.h,v 1.37 2003/08/07 16:28:00 agc Exp $ */
/* $NetBSD: segments.h,v 1.38 2003/08/24 17:52:33 chs Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -288,7 +288,8 @@ void idt_vec_free __P((int));
#define GPNPBIOSTRAMP_SEL 17
#define GTRAPTSS_SEL 18
#define GIPITSS_SEL 19
#define NGDT 20
#define GUCODEBIG_SEL 20 /* User code with executable stack */
#define NGDT 21
/*
* Entries in the Local Descriptor Table (LDT)
@ -298,6 +299,7 @@ void idt_vec_free __P((int));
#define LUCODE_SEL 2 /* User code descriptor */
#define LUDATA_SEL 3 /* User data descriptor */
#define LSOL26CALLS_SEL 4 /* Solaris 2.6 system call gate */
#define LUCODEBIG_SEL 5 /* User code with executable stack */
#define LBSDICALLS_SEL 16 /* BSDI system call gate */
#define NLDT 17

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.53 2003/08/07 16:28:00 agc Exp $ */
/* $NetBSD: vmparam.h,v 1.54 2003/08/24 17:52:33 chs Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -74,6 +74,14 @@
#define MAXSSIZ (32*1024*1024) /* max stack size */
#endif
/*
* IA-32 can't do per-page execute permission, so instead we implement
* two executable segments for %cs, one that covers everything and one
* that excludes some of the address space (currently just the stack).
* I386_MAX_EXE_ADDR is the upper boundary for the smaller segment.
*/
#define I386_MAX_EXE_ADDR (USRSTACK - MAXSSIZ)
/*
* Size of shared memory map
*/
@ -107,7 +115,7 @@
#define __HAVE_TOPDOWN_VM
#ifdef USE_TOPDOWN_VM
#define VM_DEFAULT_ADDRESS(da, sz) \
trunc_page(VM_MAXUSER_ADDRESS - MAXSSIZ - (sz))
trunc_page(USRSTACK - MAXSSIZ - (sz))
#endif
/* XXX max. amount of KVM to be used by buffers. */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.25 2003/08/12 05:06:58 matt Exp $ */
/* $NetBSD: pmap.c,v 1.26 2003/08/24 17:52:34 chs Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.25 2003/08/12 05:06:58 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.26 2003/08/24 17:52:34 chs Exp $");
#include <sys/param.h>
#include <sys/malloc.h>
@ -827,13 +827,8 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* Generate TTE.
*
* XXXX
*
* Since the kernel does not handle execution privileges properly,
* we will handle read and execute permissions together.
*/
tte = TTE_PA(pa) | TTE_EX;
tte = TTE_PA(pa);
/* XXXX -- need to support multiple page sizes. */
tte |= TTE_SZ_16K;
#ifdef DIAGNOSTIC
@ -859,6 +854,9 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (flags & VM_PROT_WRITE)
tte |= TTE_WR;
if (flags & VM_PROT_EXECUTE)
tte |= TTE_EX;
/*
* Now record mapping for later back-translation.
*/
@ -1051,21 +1049,31 @@ void
pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
volatile u_int *ptp;
int s;
int s, bic;
if (prot & VM_PROT_READ) {
s = splvm();
while (sva < eva) {
if ((ptp = pte_find(pm, sva)) != NULL) {
*ptp &= ~TTE_WR;
ppc4xx_tlb_flush(sva, pm->pm_ctx);
}
sva += PAGE_SIZE;
}
splx(s);
if ((prot & VM_PROT_READ) == 0) {
pmap_remove(pm, sva, eva);
return;
}
pmap_remove(pm, sva, eva);
bic = 0;
if ((prot & VM_PROT_WRITE) == 0) {
bic |= TTE_WR;
}
if ((prot & VM_PROT_EXECUTE) == 0) {
bic |= TTE_EX;
}
if (bic == 0) {
return;
}
s = splvm();
while (sva < eva) {
if ((ptp = pte_find(pm, sva)) != NULL) {
*ptp &= ~bic;
ppc4xx_tlb_flush(sva, pm->pm_ctx);
}
sva += PAGE_SIZE;
}
splx(s);
}
boolean_t
@ -1118,14 +1126,14 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
pm = pv->pv_pm;
va = pv->pv_va;
pmap_protect(pm, va, va+PAGE_SIZE, prot);
pmap_protect(pm, va, va + PAGE_SIZE, prot);
}
/* Now check the head pv */
if (pvh->pv_pm) {
pv = pvh;
pm = pv->pv_pm;
va = pv->pv_va;
pmap_protect(pm, va, va+PAGE_SIZE, prot);
pmap_protect(pm, va, va + PAGE_SIZE, prot);
}
}
@ -1165,7 +1173,6 @@ pmap_procwr(struct proc *p, vaddr_t va, size_t len)
struct pmap *pm = p->p_vmspace->vm_map.pmap;
int msr, ctx, opid, step;
step = CACHELINESIZE;
/*
@ -1207,7 +1214,8 @@ ppc4xx_tlb_flush(vaddr_t va, int pid)
u_long msr;
/* If there's no context then it can't be mapped. */
if (!pid) return;
if (!pid)
return;
asm("mfpid %1;" /* Save PID */
"mfmsr %2;" /* Save MSR */

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.14 2003/07/15 02:54:44 lukem Exp $ */
/* $NetBSD: trap.c,v 1.15 2003/08/24 17:52:34 chs Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.14 2003/07/15 02:54:44 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.15 2003/08/24 17:52:34 chs Exp $");
#include "opt_altivec.h"
#include "opt_ddb.h"
@ -246,13 +246,13 @@ trap(struct trapframe *frame)
}
KERNEL_PROC_UNLOCK(l);
break;
case EXC_ITMISS|EXC_USER:
case EXC_ISI|EXC_USER:
KERNEL_PROC_LOCK(l);
ftype = VM_PROT_READ | VM_PROT_EXECUTE;
ftype = VM_PROT_EXECUTE;
DBPRINTF(TDB_ALL,
("trap(EXC_ISI|EXC_USER) at %lx %s fault on %lx tf %p\n",
frame->srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
frame->srr0, frame));
rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
0, ftype);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.4 2003/04/09 22:37:32 matt Exp $ */
/* $NetBSD: pmap.h,v 1.5 2003/08/24 17:52:34 chs Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -42,6 +42,7 @@
*/
struct pmap {
register_t pm_sr[16]; /* segments used in this pmap */
int pm_exec[16]; /* counts of exec mappings */
int pm_refs; /* ref count */
struct pmap_statistics pm_stats; /* pmap statistics */
unsigned int pm_evictions; /* pvo's not in page table */
@ -74,7 +75,7 @@ pmap_remove_all(struct pmap *pmap)
/* Nothing. */
}
void pmap_bootstrap (vaddr_t kernelstart, vaddr_t kernelend);
void pmap_bootstrap (vaddr_t, vaddr_t);
boolean_t pmap_extract (struct pmap *, vaddr_t, paddr_t *);
boolean_t pmap_query_bit (struct vm_page *, int);
boolean_t pmap_clear_bit (struct vm_page *, int);
@ -84,16 +85,15 @@ boolean_t pmap_pageidlezero (paddr_t);
void pmap_syncicache (paddr_t, psize_t);
#define PMAP_NEED_PROCWR
void pmap_procwr (struct proc *, vaddr_t, size_t);
void pmap_procwr(struct proc *, vaddr_t, size_t);
int pmap_pte_spill(struct pmap *, vaddr_t);
int pmap_pte_spill(struct pmap *, vaddr_t, boolean_t);
#define PMAP_NC 0x1000
#define PMAP_STEAL_MEMORY
static __inline paddr_t vtophys (vaddr_t);
#if 1
/*
* Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
*
@ -103,10 +103,8 @@ static __inline paddr_t vtophys (vaddr_t);
*/
#define PMAP_MAP_POOLPAGE(pa) (pa)
#define PMAP_UNMAP_POOLPAGE(pa) (pa)
#endif
#define POOL_VTOPHYS(va) vtophys((vaddr_t) va)
static __inline paddr_t
vtophys(vaddr_t va)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.2 2003/02/05 07:05:19 matt Exp $ */
/* $NetBSD: pte.h,v 1.3 2003/08/24 17:52:34 chs Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -77,7 +77,7 @@ struct pteg {
#define PTE_RW PTE_BW
#define PTE_RO PTE_BR
#define PTE_EXEC 0x00000200 /* pseudo bit in attrs; page is exec */
#define PTE_EXEC 0x00000200 /* pseudo bit; page is exec */
/*
* Extract bits from address

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.13 2003/08/12 05:06:57 matt Exp $ */
/* $NetBSD: pmap.c,v 1.14 2003/08/24 17:52:35 chs Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.13 2003/08/12 05:06:57 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14 2003/08/24 17:52:35 chs Exp $");
#include "opt_altivec.h"
#include "opt_pmap.h"
@ -245,6 +245,9 @@ STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
STATIC void pmap_pvo_remove(struct pvo_entry *, int);
STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
#define pmap_pvo_reclaim(pm) NULL
STATIC void pvo_set_exec(struct pvo_entry *);
STATIC void pvo_clear_exec(struct pvo_entry *);
STATIC void tlbia(void);
@ -760,8 +763,9 @@ pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
* kernel's pte entries. In either case, interrupts are already
* disabled.
*/
int
pmap_pte_spill(struct pmap *pm, vaddr_t addr)
pmap_pte_spill(struct pmap *pm, vaddr_t addr, boolean_t exec)
{
struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
struct pvo_entry *pvo;
@ -846,6 +850,9 @@ pmap_pte_spill(struct pmap *pm, vaddr_t addr)
return 1;
}
source_pvo = pvo;
if (exec && !PVO_ISEXECUTABLE(source_pvo)) {
return 0;
}
if (victim_pvo != NULL)
break;
}
@ -970,14 +977,12 @@ pmap_real_memory(paddr_t *start, psize_t *size)
void
pmap_init(void)
{
int s;
#ifdef __HAVE_PMAP_PHYSSEG
struct pvo_tqhead *pvoh;
int bank;
long sz;
char *attr;
s = splvm();
pvoh = pmap_physseg.pvoh;
attr = pmap_physseg.attrs;
for (bank = 0; bank < vm_nphysseg; bank++) {
@ -989,10 +994,8 @@ pmap_init(void)
*attr = 0;
}
}
splx(s);
#endif
s = splvm();
pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
&pmap_pool_mallocator);
@ -1000,7 +1003,6 @@ pmap_init(void)
pool_setlowat(&pmap_mpvo_pool, 1008);
pmap_initialized = 1;
splx(s);
#ifdef PMAPCOUNTERS
evcnt_attach_static(&pmap_evcnt_mappings);
@ -1138,7 +1140,8 @@ pmap_pinit(pmap_t pm)
hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN);
pmap_vsid_bitmap[n] |= mask;
for (i = 0; i < 16; i++)
pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY;
pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
SR_NOEXEC;
return;
}
panic("pmap_pinit: out of segments");
@ -1493,10 +1496,8 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
pvo = pool_get(pl, poolflags);
msr = pmap_interrupts_off();
if (pvo == NULL) {
#if 0
pvo = pmap_pvo_reclaim(pm);
if (pvo == NULL) {
#endif
if ((flags & PMAP_CANFAIL) == 0)
panic("pmap_pvo_enter: failed");
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
@ -1504,16 +1505,14 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
#endif
pmap_interrupts_restore(msr);
return ENOMEM;
#if 0
}
#endif
}
pvo->pvo_vaddr = va;
pvo->pvo_pmap = pm;
pvo->pvo_vaddr &= ~ADDR_POFF;
if (flags & VM_PROT_EXECUTE) {
PMAPCOUNT(exec_mappings);
pvo->pvo_vaddr |= PVO_EXECUTABLE;
pvo_set_exec(pvo);
}
if (flags & PMAP_WIRED)
pvo->pvo_vaddr |= PVO_WIRED;
@ -1559,7 +1558,7 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
* If this is a kernel page, make sure it's active.
*/
if (pm == pmap_kernel()) {
i = pmap_pte_spill(pm, va);
i = pmap_pte_spill(pm, va, FALSE);
KASSERT(i);
}
}
@ -1611,7 +1610,13 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
}
/*
* Update our statistics
* Account for executable mappings.
*/
if (PVO_ISEXECUTABLE(pvo))
pvo_clear_exec(pvo);
/*
* Update our statistics.
*/
pvo->pvo_pmap->pm_stats.resident_count--;
if (pvo->pvo_pte.pte_lo & PVO_WIRED)
@ -1644,6 +1649,48 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
#endif
}
/*
* Mark a mapping as executable.
* If this is the first executable mapping in the segment,
* clear the noexec flag.
*/
STATIC void
pvo_set_exec(struct pvo_entry *pvo)
{
struct pmap *pm = pvo->pvo_pmap;
int sr;
if (pm == pmap_kernel() || PVO_ISEXECUTABLE(pvo)) {
return;
}
pvo->pvo_vaddr |= PVO_EXECUTABLE;
sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (pm->pm_exec[sr]++ == 0) {
pm->pm_sr[sr] &= ~SR_NOEXEC;
}
}
/*
* Mark a mapping as non-executable.
* If this was the last executable mapping in the segment,
* set the noexec flag.
*/
STATIC void
pvo_clear_exec(struct pvo_entry *pvo)
{
struct pmap *pm = pvo->pvo_pmap;
int sr;
if (pm == pmap_kernel() || !PVO_ISEXECUTABLE(pvo)) {
return;
}
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (--pm->pm_exec[sr] == 0) {
pm->pm_sr[sr] |= SR_NOEXEC;
}
}
/*
* Insert physical page at pa into the given pmap at virtual address va.
*/
@ -1655,7 +1702,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
struct vm_page *pg;
struct pool *pl;
register_t pte_lo;
int s;
int error;
u_int pvo_flags;
u_int was_exec = 0;
@ -1714,17 +1760,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (flags & (VM_PROT_READ|VM_PROT_WRITE))
pte_lo |= PTE_REF;
#if 0
if (pm == pmap_kernel()) {
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n",
va, pa);
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE)
printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n",
va, pa);
}
#endif
/*
* We need to know if this page can be executable
*/
@ -1734,9 +1769,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* Record mapping for later back-translation and pte spilling.
* This will overwrite any existing mapping.
*/
s = splvm();
error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
splx(s);
/*
* Flush the real page from the instruction cache if this page is
@ -1774,9 +1807,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
struct mem_region *mp;
register_t pte_lo;
register_t msr;
int error;
int s;
if (va < VM_MIN_KERNEL_ADDRESS)
panic("pmap_kenter_pa: attempt to enter "
@ -1808,12 +1839,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
/*
* We don't care about REF/CHG on PVOs on the unmanaged list.
*/
s = splvm();
msr = pmap_interrupts_off();
error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
&pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
pmap_interrupts_restore(msr);
splx(s);
if (error != 0)
panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
@ -1840,18 +1867,15 @@ pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
struct pvo_entry *pvo;
register_t msr;
int pteidx;
int s;
msr = pmap_interrupts_off();
for (; va < endva; va += PAGE_SIZE) {
s = splvm();
msr = pmap_interrupts_off();
pvo = pmap_pvo_find_va(pm, va, &pteidx);
if (pvo != NULL) {
pmap_pvo_remove(pvo, pteidx);
}
pmap_interrupts_restore(msr);
splx(s);
}
pmap_interrupts_restore(msr);
}
/*
@ -1862,7 +1886,6 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
{
struct pvo_entry *pvo;
register_t msr;
int s;
/*
* If this is a kernel pmap lookup, also check the battable
@ -1884,7 +1907,6 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
return FALSE;
}
s = splvm();
msr = pmap_interrupts_off();
pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
if (pvo != NULL) {
@ -1892,15 +1914,11 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
}
pmap_interrupts_restore(msr);
splx(s);
return pvo != NULL;
}
/*
* Lower the protection on the specified range of this pmap.
*
* There are only two cases: either the protection is going to 0,
* or it is going to read-only.
*/
void
pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
@ -1908,14 +1926,13 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
struct pvo_entry *pvo;
volatile struct pte *pt;
register_t msr;
int s;
int pteidx;
/*
* Since this routine only downgrades protection, we should
* always be called without WRITE permisison.
* always be called with at least one bit not set.
*/
KASSERT((prot & VM_PROT_WRITE) == 0);
KASSERT(prot != VM_PROT_ALL);
/*
* If there is no protection, this is equivalent to
@ -1926,9 +1943,7 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
return;
}
s = splvm();
msr = pmap_interrupts_off();
for (; va < endva; va += PAGE_SIZE) {
pvo = pmap_pvo_find_va(pm, va, &pteidx);
if (pvo == NULL)
@ -1939,7 +1954,7 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
* Revoke executable if asked to do so.
*/
if ((prot & VM_PROT_EXECUTE) == 0)
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
pvo_clear_exec(pvo);
#if 0
/*
@ -1972,9 +1987,7 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
splx(s);
}
void
@ -1982,11 +1995,8 @@ pmap_unwire(pmap_t pm, vaddr_t va)
{
struct pvo_entry *pvo;
register_t msr;
int s;
s = splvm();
msr = pmap_interrupts_off();
pvo = pmap_pvo_find_va(pm, va, NULL);
if (pvo != NULL) {
if (pvo->pvo_vaddr & PVO_WIRED) {
@ -1995,16 +2005,11 @@ pmap_unwire(pmap_t pm, vaddr_t va)
}
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
splx(s);
}
/*
* Lower the protection on the specified physical page.
*
* There are only two cases: either the protection is going to 0,
* or it is going to read-only.
*/
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
@ -2013,18 +2018,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
struct pvo_entry *pvo, *next_pvo;
volatile struct pte *pt;
register_t msr;
int s;
/*
* Since this routine only downgrades protection, if the
* maximal protection is desired, there isn't any change
* to be made.
*/
KASSERT((prot & VM_PROT_WRITE) == 0);
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
return;
s = splvm();
KASSERT(prot != VM_PROT_ALL);
msr = pmap_interrupts_off();
/*
@ -2059,7 +2054,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
* flag in the PVO.
*/
if ((prot & VM_PROT_EXECUTE) == 0)
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
pvo_clear_exec(pvo);
/*
* If this entry is already RO, don't diddle with the
@ -2085,9 +2080,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
splx(s);
}
/*
@ -2123,11 +2116,10 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
struct pvo_entry *pvo;
volatile struct pte *pt;
register_t msr;
int s;
if (pmap_attr_fetch(pg) & ptebit)
return TRUE;
s = splvm();
msr = pmap_interrupts_off();
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
@ -2139,7 +2131,6 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
pmap_attr_save(pg, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_interrupts_restore(msr);
splx(s);
return TRUE;
}
}
@ -2163,13 +2154,11 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
pmap_attr_save(pg, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_interrupts_restore(msr);
splx(s);
return TRUE;
}
}
}
pmap_interrupts_restore(msr);
splx(s);
return FALSE;
}
@ -2181,9 +2170,7 @@ pmap_clear_bit(struct vm_page *pg, int ptebit)
volatile struct pte *pt;
register_t msr;
int rv = 0;
int s;
s = splvm();
msr = pmap_interrupts_off();
/*
@ -2231,7 +2218,7 @@ pmap_clear_bit(struct vm_page *pg, int ptebit)
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
splx(s);
/*
* If we are clearing the modify bit and this page was marked EXEC
* and the user of the page thinks the page was modified, then we

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.84 2003/08/12 05:06:56 matt Exp $ */
/* $NetBSD: trap.c,v 1.85 2003/08/24 17:52:35 chs Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.84 2003/08/12 05:06:56 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.85 2003/08/24 17:52:35 chs Exp $");
#include "opt_altivec.h"
#include "opt_ddb.h"
@ -122,7 +122,7 @@ trap(struct trapframe *frame)
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map),
trunc_page(va))) {
trunc_page(va), FALSE)) {
/* KERNEL_PROC_UNLOCK(l); */
KERNEL_UNLOCK();
return;
@ -179,6 +179,7 @@ trap(struct trapframe *frame)
ftype = VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
/*
* Try to spill an evicted pte into the page table
* if this wasn't a protection fault and the pmap
@ -187,7 +188,8 @@ trap(struct trapframe *frame)
map = &p->p_vmspace->vm_map;
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->dar))) {
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->dar),
FALSE)) {
KERNEL_PROC_UNLOCK(l);
break;
}
@ -232,20 +234,20 @@ trap(struct trapframe *frame)
case EXC_ISI|EXC_USER:
KERNEL_PROC_LOCK(l);
ci->ci_ev_isi.ev_count++;
/*
* Try to spill an evicted pte into the page table
* if this wasn't a protection fault and the pmap
* has some evicted pte's.
*/
map = &p->p_vmspace->vm_map;
if ((frame->srr1 & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0))) {
if (pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0),
TRUE)) {
KERNEL_PROC_UNLOCK(l);
break;
}
ftype = VM_PROT_READ | VM_PROT_EXECUTE;
ftype = VM_PROT_EXECUTE;
rv = uvm_fault(map, trunc_page(frame->srr0), 0, ftype);
if (rv == 0) {
KERNEL_PROC_UNLOCK(l);
@ -254,7 +256,7 @@ trap(struct trapframe *frame)
ci->ci_ev_isi_fatal.ev_count++;
if (cpu_printfataltraps) {
printf("trap: pid %d.%d (%s): user ISI trap @ %#lx "
"(SSR1=%#lx)\n", p->p_pid, l->l_lid, p->p_comm,
"(SRR1=%#lx)\n", p->p_pid, l->l_lid, p->p_comm,
frame->srr0, frame->srr1);
}
trapsignal(l, SIGSEGV, EXC_ISI);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.268 2003/08/21 09:36:28 pk Exp $ */
/* $NetBSD: pmap.c,v 1.269 2003/08/24 17:52:36 chs Exp $ */
/*
* Copyright (c) 1996
@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.268 2003/08/21 09:36:28 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.269 2003/08/24 17:52:36 chs Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -891,6 +891,59 @@ setpte4m(va, pte)
setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte);
}
/*
* Translation table for kernel vs. PTE protection bits.
*/
u_int protection_codes[2][8];
#define pte_prot4m(pm, prot) \
(protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)])
static void
sparc_protection_init4m(void)
{
u_int prot, *kp, *up;
kp = protection_codes[0];
up = protection_codes[1];
for (prot = 0; prot < 8; prot++) {
switch (prot) {
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RWX_RWX;
break;
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RW_RW;
break;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_RX_RX;
break;
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_R_R;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RWX_RWX;
break;
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RW_RW;
break;
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_X_X;
break;
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_N_RX;
break;
}
}
}
/*
* Page table pool back-end.
*/
@ -3875,6 +3928,7 @@ pmap_bootstrap4m(top)
* Now switch to kernel pagetables (finally!)
*/
mmu_install_tables(&cpuinfo);
sparc_protection_init4m();
}
static u_long prom_ctxreg;
@ -5389,11 +5443,6 @@ out:
/*
* Lower (make more strict) the protection on the specified
* range of this pmap.
*
* There are only two cases: either the protection is going to 0
* (in which case we call pmap_remove to do the dirty work), or
* it is going from read/write to read-only. The latter is
* fairly easy.
*/
void
pmap_protect4m(pm, sva, eva, prot)
@ -5405,6 +5454,12 @@ pmap_protect4m(pm, sva, eva, prot)
int s, vr, vs;
struct regmap *rp;
struct segmap *sp;
int newprot;
/* XXX noexec stuff gets "Level 15 Interrupt" without this */
if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
prot = VM_PROT_NONE;
}
if ((prot & VM_PROT_READ) == 0) {
pmap_remove(pm, sva, eva);
@ -5414,11 +5469,14 @@ pmap_protect4m(pm, sva, eva, prot)
#ifdef DEBUG
if (pmapdebug & PDB_CHANGEPROT)
printf("pmap_protect[%d][curpid %d, ctx %d,%d](%lx, %lx, %x)\n",
cpu_number(), getcontext4m(),
curproc==NULL ? -1 : curproc->p_pid,
pm->pm_ctx ? pm->pm_ctxnum : -1, sva, eva, prot);
cpu_number(), curproc->p_pid,
getcontext4m(), pm->pm_ctx ? pm->pm_ctxnum : -1,
sva, eva, prot);
#endif
newprot = pte_prot4m(pm, prot);
write_user_windows();
s = splvm();
PMAP_MAP_TO_HEAD_LOCK();
simple_lock(&pm->pm_lock);
@ -5428,56 +5486,50 @@ pmap_protect4m(pm, sva, eva, prot)
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
nva = VSTOVA(vr,vs + 1);
if (nva == 0) /* XXX */
panic("pmap_protect: last segment"); /* cannot happen(why?)*/
if (nva > eva)
nva = eva;
if (rp->rg_nsegmap == 0) {
va = nva;
continue;
}
#ifdef DEBUG
if (rp->rg_segmap == NULL)
panic("pmap_protect: no segments");
#endif
sp = &rp->rg_segmap[vs];
if (sp->sg_npte == 0) {
va = nva;
continue;
}
#ifdef DEBUG
if (sp->sg_pte == NULL)
panic("pmap_protect: no pages");
#endif
/*
* pages loaded: take away write bits from MMU PTEs
*/
pmap_stats.ps_npg_prot_all += (nva - va) >> PGSHIFT;
for (; va < nva; va += NBPG) {
int tpte;
int tpte, npte;
tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
if ((tpte & SRMMU_PGTYPE) != PG_SUN4M_OBMEM)
continue;
npte = (tpte & ~SRMMU_PROT_MASK) | newprot;
if (npte == tpte)
continue;
/*
* Flush cache so that any existing cache
* tags are updated. This is really only
* needed for PTEs that lose PG_W.
* tags are updated.
*/
if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
(PPROT_WRITE|PG_SUN4M_OBMEM)) {
pmap_stats.ps_npg_prot_actual++;
if (pm->pm_ctx) {
cache_flush_page(va, pm->pm_ctxnum);
pmap_stats.ps_npg_prot_actual++;
if (pm->pm_ctx) {
cache_flush_page(va, pm->pm_ctxnum);
#if !defined(MULTIPROCESSOR)
/* Flush TLB entry */
tlb_flush_page(va, pm->pm_ctxnum,
PMAP_CPUSET(pm));
/* Flush TLB entry */
tlb_flush_page(va, pm->pm_ctxnum,
PMAP_CPUSET(pm));
#endif
}
updatepte4m(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
PPROT_WRITE, 0, pm->pm_ctxnum,
PMAP_CPUSET(pm));
}
updatepte4m(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
SRMMU_PROT_MASK, newprot, pm->pm_ctxnum,
PMAP_CPUSET(pm));
}
}
simple_unlock(&pm->pm_lock);
@ -5507,10 +5559,7 @@ pmap_changeprot4m(pm, va, prot, flags)
cpu_number(), pm, va, prot, flags);
#endif
if (pm == pmap_kernel())
newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
else
newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
newprot = pte_prot4m(pm, prot);
pmap_stats.ps_changeprots++;
@ -6189,12 +6238,10 @@ pmap_enter4m(pm, va, pa, prot, flags)
" MicroSPARC");
}
#endif
pteproto |= SRMMU_TEPTE;
pteproto |= PMAP_T2PTE_SRMMU(pa);
/* Make sure we get a pte with appropriate perms! */
pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
pa &= ~PMAP_TNC_SRMMU;
/*
* Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
* since the pvlist no-cache bit might change as a result of the
@ -6202,8 +6249,8 @@ pmap_enter4m(pm, va, pa, prot, flags)
*/
pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
if (prot & VM_PROT_WRITE)
pteproto |= PPROT_WRITE;
/* Make sure we get a pte with appropriate perms! */
pteproto |= pte_prot4m(pm, prot);
if (pm == pmap_kernel())
error = pmap_enk4m(pm, va, prot, flags, pg, pteproto | PPROT_S);
@ -6524,12 +6571,10 @@ pmap_kenter_pa4m(va, pa, prot)
/* Initialise pteproto with cache bit */
pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
pteproto |= SRMMU_TEPTE | PPROT_S;
pteproto |= PMAP_T2PTE_SRMMU(pa);
pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
pteproto |= (atop(pa & ~PMAP_TNC_SRMMU) << SRMMU_PPNSHIFT);
if (prot & VM_PROT_WRITE)
pteproto |= PPROT_WRITE;
pteproto |= PPROT_S;
pteproto |= pte_prot4m(pm, prot);
vr = VA_VREG(va);
vs = VA_VSEG(va);
@ -6641,7 +6686,7 @@ pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot)
struct segmap *sp;
size = roundup(size,NBPG);
newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
newprot = pte_prot4m(pm, prot);
while (size > 0) {
rp = &pm->pm_regmap[VA_VREG(va)];
@ -6739,7 +6784,6 @@ pmap_extract4_4c(pm, va, pap)
return (FALSE);
}
sp = &rp->rg_segmap[vs];
ptep = sp->sg_pte;
if (ptep == NULL) {
#ifdef DEBUG
@ -7544,11 +7588,6 @@ pmap_deactivate(l)
pmap_t pm;
struct proc *p;
#ifdef DIAGNOSTIC
if (l == NULL)
panic("pmap_deactivate: l==NULL");
#endif
p = l->l_proc;
if (p->p_vmspace &&
(pm = p->p_vmspace->vm_map.pmap) != pmap_kernel()) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.138 2003/08/12 15:34:32 pk Exp $ */
/* $NetBSD: trap.c,v 1.139 2003/08/24 17:52:37 chs Exp $ */
/*
* Copyright (c) 1996
@ -49,7 +49,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.138 2003/08/12 15:34:32 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.139 2003/08/24 17:52:37 chs Exp $");
#include "opt_ddb.h"
#include "opt_ktrace.h"
@ -1170,7 +1170,17 @@ mem_access_fault4m(type, sfsr, sfva, tf)
}
/* Now munch on protections... */
atype = sfsr & SFSR_AT_STORE ? VM_PROT_WRITE : VM_PROT_READ;
if (sfsr & SFSR_AT_STORE) {
/* stores are never text faults. */
atype = VM_PROT_WRITE;
} else {
if ((sfsr & SFSR_AT_TEXT) || type == T_TEXTFAULT) {
atype = VM_PROT_EXECUTE;
} else {
atype = VM_PROT_READ;
}
}
if (psr & PSR_PS) {
extern char Lfsbail[];
if (sfsr & SFSR_AT_TEXT || type == T_TEXTFAULT) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.28 2003/08/10 02:30:51 chs Exp $ */
/* $NetBSD: pmap.h,v 1.29 2003/08/24 17:52:37 chs Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -199,6 +199,7 @@ int pmap_dumpmmu __P((int (*)__P((dev_t, daddr_t, caddr_t, size_t)),
daddr_t));
int pmap_pa_exists __P((paddr_t));
void switchexit __P((struct lwp *, int));
void pmap_kprotect(vaddr_t, vm_prot_t);
/* SPARC64 specific */
int ctx_alloc __P((struct pmap *));

View File

@ -1,4 +1,4 @@
/* $NetBSD: clock.c,v 1.60 2003/07/15 03:36:08 lukem Exp $ */
/* $NetBSD: clock.c,v 1.61 2003/08/24 17:52:38 chs Exp $ */
/*
* Copyright (c) 1992, 1993
@ -55,7 +55,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.60 2003/07/15 03:36:08 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.61 2003/08/24 17:52:38 chs Exp $");
#include "opt_multiprocessor.h"
@ -260,9 +260,9 @@ clockattach_sbus(parent, self, aux)
if (sbus_bus_map(bt,
sa->sa_slot,
(sa->sa_offset & ~PAGE_SIZE),
(sa->sa_offset & ~(PAGE_SIZE - 1)),
sz,
BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_READONLY,
BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_READONLY,
&ci.ci_bh) != 0) {
printf("%s: can't map register\n", self->dv_xname);
return;
@ -284,30 +284,28 @@ clock_wenable(handle, onoff)
struct todr_chip_handle *handle;
int onoff;
{
register int s, err = 0;
register int prot;/* nonzero => change prot */
struct clock_info *ci;
vm_prot_t prot;
vaddr_t va;
int s, err = 0;
static int writers;
s = splhigh();
if (onoff)
prot = writers++ == 0 ?
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED : 0;
prot = writers++ == 0 ? VM_PROT_READ|VM_PROT_WRITE : 0;
else
prot = --writers == 0 ?
VM_PROT_READ|PMAP_WIRED : 0;
prot = --writers == 0 ? VM_PROT_READ : 0;
splx(s);
if (prot) {
struct clock_info *ci =
(struct clock_info *)handle->bus_cookie;
vaddr_t vaddr =
(vaddr_t)bus_space_vaddr(ci->ci_bt, ci->ci_bh);
if (vaddr)
pmap_protect(pmap_kernel(), vaddr, vaddr+PAGE_SIZE,
prot);
else
printf("clock_wenable: WARNING -- cannot get va\n");
if (prot == VM_PROT_NONE) {
return 0;
}
ci = (struct clock_info *)handle->bus_cookie;
va = (vaddr_t)bus_space_vaddr(ci->ci_bt, ci->ci_bh);
if (va == NULL) {
printf("clock_wenable: WARNING -- cannot get va\n");
return EIO;
}
pmap_kprotect(va, prot);
return (err);
}
@ -440,7 +438,7 @@ clockattach_rtc(parent, self, aux)
#endif
printf(": %s\n", model);
/*
/*
* Turn interrupts off, just in case. (Although they shouldn't
* be wired to an interrupt controller on sparcs).
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_interface.c,v 1.70 2003/07/15 03:36:08 lukem Exp $ */
/* $NetBSD: db_interface.c,v 1.71 2003/08/24 17:52:38 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath. All rights reserved.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.70 2003/07/15 03:36:08 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.71 2003/08/24 17:52:38 chs Exp $");
#include "opt_ddb.h"
@ -556,35 +556,38 @@ int64_t pseg_get __P((struct pmap *, vaddr_t));
void
db_dump_pmap(pm)
struct pmap* pm;
struct pmap *pm;
{
/* print all valid pages in the kernel pmap */
long i, j, k, n;
unsigned long long i, j, k, n, data0, data1;
paddr_t *pdir, *ptbl;
/* Almost the same as pmap_collect() */
n = 0;
for (i=0; i<STSZ; i++) {
if((pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i], ASI_PHYS_CACHED))) {
db_printf("pdir %ld at %lx:\n", i, (long)pdir);
for (k=0; k<PDSZ; k++) {
if ((ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k], ASI_PHYS_CACHED))) {
db_printf("\tptable %ld:%ld at %lx:\n", i, k, (long)ptbl);
for (j=0; j<PTSZ; j++) {
int64_t data0, data1;
data0 = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED);
j++;
data1 = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED);
if (data0 || data1) {
db_printf("%llx: %llx\t",
(unsigned long long)(((u_int64_t)i<<STSHIFT)|(k<<PDSHIFT)|((j-1)<<PTSHIFT)),
(unsigned long long)(data0));
db_printf("%llx: %llx\n",
(unsigned long long)(((u_int64_t)i<<STSHIFT)|(k<<PDSHIFT)|(j<<PTSHIFT)),
(unsigned long long)(data1));
}
}
for (i = 0; i < STSZ; i++) {
pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i], ASI_PHYS_CACHED);
if (!pdir) {
continue;
}
db_printf("pdir %lld at %lx:\n", i, (long)pdir);
for (k = 0; k < PDSZ; k++) {
ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k], ASI_PHYS_CACHED);
if (!ptbl) {
continue;
}
db_printf("\tptable %lld:%lld at %lx:\n", i, k, (long)ptbl);
for (j = 0; j < PTSZ; j++) {
data0 = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED);
j++;
data1 = ldxa((vaddr_t)&ptbl[j], ASI_PHYS_CACHED);
if (!data0 && !data1) {
continue;
}
db_printf("%016llx: %016llx\t",
(i << STSHIFT) | (k << PDSHIFT) | ((j - 1) << PTSHIFT),
data0);
db_printf("%016llx: %016llx\n",
(i << STSHIFT) | (k << PDSHIFT) | (j << PTSHIFT),
data1);
}
}
}
@ -725,22 +728,22 @@ db_dump_dtsb(addr, have_addr, count, modif)
db_expr_t count;
char *modif;
{
extern pte_t *tsb;
extern pte_t *tsb_dmmu;
extern int tsbsize;
#define TSBENTS (512<<tsbsize)
#define TSBENTS (512 << tsbsize)
int i;
db_printf("TSB:\n");
for (i=0; i<TSBENTS; i++) {
for (i = 0; i < TSBENTS; i++) {
db_printf("%4d:%4d:%08x %08x:%08x ", i,
(int)((tsb[i].tag&TSB_TAG_G)?-1:TSB_TAG_CTX(tsb[i].tag)),
(int)((i<<13)|TSB_TAG_VA(tsb[i].tag)),
(int)(tsb[i].data>>32), (int)tsb[i].data);
(int)((tsb_dmmu[i].tag&TSB_TAG_G)?-1:TSB_TAG_CTX(tsb_dmmu[i].tag)),
(int)((i<<13)|TSB_TAG_VA(tsb_dmmu[i].tag)),
(int)(tsb_dmmu[i].data>>32), (int)tsb_dmmu[i].data);
i++;
db_printf("%4d:%4d:%08x %08x:%08x\n", i,
(int)((tsb[i].tag&TSB_TAG_G)?-1:TSB_TAG_CTX(tsb[i].tag)),
(int)((i<<13)|TSB_TAG_VA(tsb[i].tag)),
(int)(tsb[i].data>>32), (int)tsb[i].data);
(int)((tsb_dmmu[i].tag&TSB_TAG_G)?-1:TSB_TAG_CTX(tsb_dmmu[i].tag)),
(int)((i<<13)|TSB_TAG_VA(tsb_dmmu[i].tag)),
(int)(tsb_dmmu[i].data>>32), (int)tsb_dmmu[i].data);
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.178 2003/07/08 22:09:26 cdi Exp $ */
/* $NetBSD: locore.s,v 1.179 2003/08/24 17:52:38 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath
@ -2296,6 +2296,7 @@ data_miss:
sll %g6, 3, %g6
brz,pn %g4, data_nfo ! NULL entry? check somewhere else
add %g6, %g4, %g6
1:
ldxa [%g6] ASI_PHYS_CACHED, %g4
brgez,pn %g4, data_nfo ! Entry invalid? Punt
@ -2308,9 +2309,9 @@ data_miss:
cmp %g4, %g7
bne,pn %xcc, 1b
or %g4, TTE_ACCESS, %g4 ! Update the access bit
1:
stx %g1, [%g2] ! Update TSB entry tag
stx %g4, [%g2+8] ! Update TSB entry data
#ifdef DEBUG
set DATA_START, %g6 ! debug
@ -3256,7 +3257,14 @@ instr_miss:
1:
ldxa [%g6] ASI_PHYS_CACHED, %g4
brgez,pn %g4, textfault
or %g4, TTE_ACCESS, %g7 ! Update accessed bit
nop
/* Check if it's an executable mapping. */
andcc %g4, TTE_EXEC, %g0
bz,pn %xcc, textfault
nop
or %g4, TTE_ACCESS, %g7 ! Update accessed bit
btst TTE_ACCESS, %g4 ! Need to update access git?
bne,pt %xcc, 1f
nop
@ -5942,44 +5950,32 @@ _C_LABEL(cpu_initialize):
flushw
/*
* Step 7: change the trap base register, and install our TSB
*
* XXXX -- move this to CPUINFO_VA+32KB?
* Step 7: change the trap base register, and install our TSB pointers
*/
sethi %hi(0x1fff), %l2
set _C_LABEL(tsb), %l0
set _C_LABEL(tsb_dmmu), %l0
LDPTR [%l0], %l0
set _C_LABEL(tsbsize), %l1
or %l2, %lo(0x1fff), %l2
ld [%l1], %l1
andn %l0, %l2, %l0 ! Mask off size and split bits
or %l0, %l1, %l0 ! Make a TSB pointer
! srl %l0, 0, %l0 ! DEBUG -- make sure this is a valid pointer by zeroing the high bits
#ifdef DEBUG
set _C_LABEL(pmapdebug), %o1
ld [%o1], %o1
sethi %hi(0x40000), %o2
btst %o2, %o1
bz 0f
set 1f, %o0 ! Debug printf
srlx %l0, 32, %o1
call _C_LABEL(prom_printf)
srl %l0, 0, %o2
.data
1:
.asciz "Setting TSB pointer %08x %08x\r\n"
_ALIGN
.text
0:
#endif
set TSB, %l2
stxa %l0, [%l2] ASI_IMMU ! Install insn TSB pointer
membar #Sync ! We may need more membar #Sync in here
stxa %l0, [%l2] ASI_DMMU ! Install data TSB pointer
membar #Sync
sethi %hi(0x1fff), %l2
set _C_LABEL(tsb_immu), %l0
LDPTR [%l0], %l0
set _C_LABEL(tsbsize), %l1
or %l2, %lo(0x1fff), %l2
ld [%l1], %l1
andn %l0, %l2, %l0 ! Mask off size and split bits
or %l0, %l1, %l0 ! Make a TSB pointer
set TSB, %l2
stxa %l0, [%l2] ASI_IMMU ! Install instruction TSB pointer
membar #Sync
set _C_LABEL(trapbase), %l1
call _C_LABEL(prom_set_trap_table) ! Now we should be running 100% from our handlers
mov %l1, %o0
@ -5994,17 +5990,6 @@ _C_LABEL(cpu_initialize):
wrpr %g0, 0, %tstate
#endif
#ifdef NOTDEF_DEBUG
set 1f, %o0 ! Debug printf
srax %l0, 32, %o1
call _C_LABEL(prom_printf)
srl %l0, 0, %o2
.data
1:
.asciz "Our trap handler is enabled\r\n"
_ALIGN
.text
#endif
/*
* Call our startup routine.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.142 2003/07/15 03:36:09 lukem Exp $ */
/* $NetBSD: pmap.c,v 1.143 2003/08/24 17:52:39 chs Exp $ */
/*
*
* Copyright (C) 1996-1999 Eduardo Horvath.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.142 2003/07/15 03:36:09 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.143 2003/08/24 17:52:39 chs Exp $");
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
#define HWREF
@ -139,7 +139,8 @@ u_int64_t first_phys_addr;
/*
* Here's the CPU TSB stuff. It's allocated in pmap_bootstrap.
*/
pte_t *tsb;
pte_t *tsb_dmmu;
pte_t *tsb_immu;
int tsbsize; /* tsbents = 512 * 2^^tsbsize */
#define TSBENTS (512<<tsbsize)
#define TSBSIZE (TSBENTS * 16)
@ -173,10 +174,29 @@ static int memh = 0, vmemh = 0; /* Handles to OBP devices */
paddr_t avail_start, avail_end; /* These are used by ps & family */
static int ptelookup_va __P((vaddr_t va)); /* sun4u */
#if notyet
static void tsb_enter __P((int ctx, int64_t va, int64_t data));
#endif
static int ptelookup_va __P((vaddr_t va));
static __inline void
clrx(void *addr)
{
__asm __volatile("clrx [%0]" : : "r" (addr) : "memory");
}
static __inline void
tsb_invalidate(int ctx, vaddr_t va)
{
int i;
int64_t tag;
i = ptelookup_va(va);
tag = TSB_TAG(0, ctx, va);
if (tsb_dmmu[i].tag == tag) {
clrx(&tsb_dmmu[i].data);
}
if (tsb_immu[i].tag == tag) {
clrx(&tsb_immu[i].data);
}
}
struct pmap_stats {
int ps_unlink_pvfirst; /* # of pv_unlinks on head */
@ -950,9 +970,10 @@ remap_data:
*/
BDPRINTF(PDB_BOOT1, ("firstaddr before TSB=%lx\r\n",
(u_long)firstaddr));
firstaddr = ((firstaddr + TSBSIZE - 1) & ~(TSBSIZE-1));
firstaddr = ((firstaddr + TSBSIZE - 1) & ~(TSBSIZE - 1));
#ifdef DEBUG
i = (firstaddr + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1); /* First, page align */
/* First, page align */
i = (firstaddr + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
if ((int)firstaddr < i) {
prom_printf("TSB alloc fixup failed\r\n");
prom_printf("frobbed i, firstaddr before TSB=%x, %lx\r\n",
@ -963,12 +984,14 @@ remap_data:
#endif
BDPRINTF(PDB_BOOT, ("frobbed i, firstaddr before TSB=%x, %lx\r\n",
(int)i, (u_long)firstaddr));
valloc(tsb, pte_t, TSBSIZE);
bzero(tsb, TSBSIZE);
valloc(tsb_dmmu, pte_t, TSBSIZE);
bzero(tsb_dmmu, TSBSIZE);
valloc(tsb_immu, pte_t, TSBSIZE);
bzero(tsb_immu, TSBSIZE);
BDPRINTF(PDB_BOOT1, ("firstaddr after TSB=%lx\r\n", (u_long)firstaddr));
BDPRINTF(PDB_BOOT1, ("TSB allocated at %p size %08x\r\n", (void*)tsb,
(int)TSBSIZE));
BDPRINTF(PDB_BOOT1, ("TSB allocated at %p/%p size %08x\r\n",
tsb_dmmu, tsb_immu, TSBSIZE));
first_phys_addr = mem->start;
BDPRINTF(PDB_BOOT1, ("firstaddr after pmap=%08lx\r\n",
@ -1202,13 +1225,13 @@ remap_data:
va += PAGE_SIZE;
msgbufsiz -= PAGE_SIZE;
phys_msgbuf += PAGE_SIZE;
} while (psize-=PAGE_SIZE);
} while (psize -= PAGE_SIZE);
}
BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\r\n"));
BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\r\n"));
for (i = 0; i < prom_map_size; i++)
if (prom_map[i].vstart && ((prom_map[i].vstart>>32) == 0))
if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0))
for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
int k;
@ -1225,7 +1248,7 @@ remap_data:
#endif
/* Enter PROM map into pmap_kernel() */
pmap_enter_kpage(prom_map[i].vstart + j,
(prom_map[i].tte + j)|
(prom_map[i].tte + j) | TLB_EXEC |
page_size_map[k].code);
}
BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\r\n"));
@ -1697,7 +1720,6 @@ pmap_kenter_pa(va, pa, prot)
if (prot & VM_PROT_WRITE)
tte.data |= TLB_REAL_W|TLB_W;
tte.data |= TLB_TSB_LOCK; /* wired */
KASSERT((tte.data & TLB_NFO) == 0);
ptp = 0;
retry:
@ -1721,19 +1743,19 @@ pmap_kenter_pa(va, pa, prot)
i = ptelookup_va(va);
if (pmapdebug & PDB_ENTER)
prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
"tsb[%d]=%08x\r\n", va, (int)(tte.data>>32),
(int)tte.data, i, &tsb[i]);
if (pmapdebug & PDB_MMU_STEAL && tsb[i].data) {
"tsb_dmmu[%d]=%08x\r\n", va, (int)(tte.data>>32),
(int)tte.data, i, &tsb_dmmu[i]);
if (pmapdebug & PDB_MMU_STEAL && tsb_dmmu[i].data) {
prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
"data=%08x:%08x tsb[%d]=%08x\r\n",
(int)(tsb[i].tag>>32), (int)tsb[i].tag,
(int)(tsb[i].data>>32), (int)tsb[i].data,
i, &tsb[i]);
prom_printf("with va=%08x data=%08x:%08x tsb[%d]=%08x\r\n",
va, (int)(tte.data>>32), (int)tte.data, i, &tsb[i]);
"data=%08x:%08x tsb_dmmu[%d]=%08x\r\n",
(int)(tsb_dmmu[i].tag>>32), (int)tsb_dmmu[i].tag,
(int)(tsb_dmmu[i].data>>32), (int)tsb_dmmu[i].data,
i, &tsb_dmmu[i]);
prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\r\n",
va, (int)(tte.data>>32), (int)tte.data, i,
&tsb_dmmu[i]);
}
#endif
KASSERT((tsb[i].data & TLB_NFO) == 0);
}
/*
@ -1752,7 +1774,6 @@ pmap_kremove(va, size)
vaddr_t flushva = va;
vsize_t flushsize = size;
paddr_t pa;
int i;
boolean_t flush = FALSE;
KASSERT(va < INTSTACK || va > EINTSTACK);
@ -1793,12 +1814,7 @@ pmap_kremove(va, size)
(int)va_to_pte(va)));
REMOVE_STAT(removes);
i = ptelookup_va(va);
if (tsb[i].tag > 0 &&
tsb[i].tag == TSB_TAG(0, pm->pm_ctx, va)) {
tsb[i].data = 0;
KASSERT((tsb[i].data & TLB_NFO) == 0);
}
tsb_invalidate(pm->pm_ctx, va);
REMOVE_STAT(tflushes);
/*
@ -1818,6 +1834,7 @@ pmap_kremove(va, size)
* Insert physical page at pa into the given pmap at virtual address va.
* Supports 64-bit pa so we can map I/O space.
*/
int
pmap_enter(pm, va, pa, prot, flags)
struct pmap *pm;
@ -1924,6 +1941,8 @@ pmap_enter(pm, va, pa, prot, flags)
#ifdef HWREF
if (prot & VM_PROT_WRITE)
tte.data |= TLB_REAL_W;
if (prot & VM_PROT_EXECUTE)
tte.data |= TLB_EXEC;
#else
/* If it needs ref accounting do nothing. */
if (!(flags & VM_PROT_READ)) {
@ -1939,7 +1958,6 @@ pmap_enter(pm, va, pa, prot, flags)
}
if (wired)
tte.data |= TLB_TSB_LOCK;
KASSERT((tte.data & TLB_NFO) == 0);
ptp = 0;
retry:
@ -2000,18 +2018,21 @@ pmap_enter(pm, va, pa, prot, flags)
i = ptelookup_va(va);
if (pmapdebug & PDB_ENTER)
prom_printf("pmap_enter: va=%08x data=%08x:%08x "
"tsb[%d]=%08x\r\n", va, (int)(tte.data>>32),
(int)tte.data, i, &tsb[i]);
if (pmapdebug & PDB_MMU_STEAL && tsb[i].data) {
"tsb_dmmu[%d]=%08x\r\n", va, (int)(tte.data>>32),
(int)tte.data, i, &tsb_dmmu[i]);
if (pmapdebug & PDB_MMU_STEAL && tsb_dmmu[i].data) {
prom_printf("pmap_enter: evicting entry tag=%x:%08x "
"data=%08x:%08x tsb[%d]=%08x\r\n",
(int)(tsb[i].tag>>32), (int)tsb[i].tag,
(int)(tsb[i].data>>32), (int)tsb[i].data, i, &tsb[i]);
prom_printf("with va=%08x data=%08x:%08x tsb[%d]=%08x\r\n",
va, (int)(tte.data>>32), (int)tte.data, i, &tsb[i]);
"data=%08x:%08x tsb_dmmu[%d]=%08x\r\n",
(int)(tsb_dmmu[i].tag>>32), (int)tsb_dmmu[i].tag,
(int)(tsb_dmmu[i].data>>32), (int)tsb_dmmu[i].data, i,
&tsb_dmmu[i]);
prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\r\n",
va, (int)(tte.data>>32), (int)tte.data, i,
&tsb_dmmu[i]);
}
#endif
if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
/*
* preload the TSB with the new entry,
@ -2021,9 +2042,16 @@ pmap_enter(pm, va, pa, prot, flags)
i = ptelookup_va(va);
tte.tag = TSB_TAG(0, pm->pm_ctx, va);
s = splhigh();
tsb[i].tag = tte.tag;
__asm __volatile("" : : : "memory");
tsb[i].data = tte.data;
if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
tsb_dmmu[i].tag = tte.tag;
__asm __volatile("" : : : "memory");
tsb_dmmu[i].data = tte.data;
}
if (flags & VM_PROT_EXECUTE) {
tsb_immu[i].tag = tte.tag;
__asm __volatile("" : : : "memory");
tsb_immu[i].data = tte.data;
}
/*
* it's only necessary to flush the TLB if this page was
@ -2034,14 +2062,9 @@ pmap_enter(pm, va, pa, prot, flags)
tlb_flush_pte(va, pm->pm_ctx);
splx(s);
} else if (wasmapped && (pm->pm_ctx || pm == pmap_kernel())) {
i = ptelookup_va(va);
if (tsb[i].tag > 0 &&
tsb[i].tag == TSB_TAG(0, pm->pm_ctx, va)) {
tsb[i].data = 0;
}
/* Force reload -- protections may be changed */
tsb_invalidate(pm->pm_ctx, va);
tlb_flush_pte(va, pm->pm_ctx);
KASSERT((tsb[i].data & TLB_NFO) == 0);
}
/* We will let the fast mmu miss interrupt load the new translation */
@ -2075,7 +2098,6 @@ pmap_remove(pm, va, endva)
paddr_t pa;
struct vm_page *pg;
pv_entry_t pv;
int i;
boolean_t flush = FALSE;
/*
@ -2135,22 +2157,22 @@ pmap_remove(pm, va, endva)
if (!pm->pm_ctx && pm != pmap_kernel())
continue;
i = ptelookup_va(va);
if (tsb[i].tag > 0 &&
tsb[i].tag == TSB_TAG(0, pm->pm_ctx, va)) {
DPRINTF(PDB_REMOVE, (" clearing TSB [%d]\n", i));
tsb[i].data = 0;
}
REMOVE_STAT(tflushes);
/*
* if the pmap is being torn down, don't bother flushing.
*/
if (!pm->pm_refs)
continue;
/*
* Here we assume nothing can get into the TLB
* unless it has a PTE.
*/
if (pm->pm_refs) {
tlb_flush_pte(va, pm->pm_ctx);
}
tsb_invalidate(pm->pm_ctx, va);
REMOVE_STAT(tflushes);
tlb_flush_pte(va, pm->pm_ctx);
}
simple_unlock(&pm->pm_lock);
if (flush && pm->pm_refs) {
@ -2174,14 +2196,10 @@ pmap_protect(pm, sva, eva, prot)
int64_t data;
struct vm_page *pg;
pv_entry_t pv;
int i;
KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
if ((prot & (VM_PROT_WRITE|PMAP_WIRED)) == VM_PROT_WRITE)
return;
if (prot == VM_PROT_NONE) {
pmap_remove(pm, sva, eva);
return;
@ -2216,15 +2234,6 @@ pmap_protect(pm, sva, eva, prot)
(u_int)sva, (long long)pa, (int)va_to_seg(sva),
(int)va_to_pte(sva)));
#ifdef DEBUG
/* Catch this before the assertion */
if (data & TLB_NFO) {
printf("pmap_protect: pm=%p NFO mapping "
"va=%x data=%08llx\n",
pm, (u_int)sva, (long long)data);
Debugger();
}
#endif
pg = PHYS_TO_VM_PAGE(pa);
if (pg) {
/* Save REF/MOD info */
@ -2236,12 +2245,11 @@ pmap_protect(pm, sva, eva, prot)
}
/* Just do the pmap and TSB, not the pv_list */
data &= ~(TLB_W|TLB_REAL_W);
/* Turn *ON* write to wired mappings. */
if ((prot & (VM_PROT_WRITE|PMAP_WIRED)) ==
(VM_PROT_WRITE|PMAP_WIRED))
data |= (TLB_W|TLB_REAL_W);
KASSERT((data & TLB_NFO) == 0);
if ((prot & VM_PROT_WRITE) == 0)
data &= ~(TLB_W|TLB_REAL_W);
if ((prot & VM_PROT_EXECUTE) == 0)
data &= ~(TLB_EXEC);
if (pseg_set(pm, sva, data, 0)) {
printf("pmap_protect: gotten pseg empty!\n");
Debugger();
@ -2250,12 +2258,8 @@ pmap_protect(pm, sva, eva, prot)
if (!pm->pm_ctx && pm != pmap_kernel())
continue;
i = ptelookup_va(sva);
if (tsb[i].tag > 0 &&
tsb[i].tag == TSB_TAG(0, pm->pm_ctx, sva)) {
tsb[i].data = data;
KASSERT((tsb[i].data & TLB_NFO) == 0);
}
tsb_invalidate(pm->pm_ctx, sva);
tlb_flush_pte(sva, pm->pm_ctx);
}
simple_unlock(&pm->pm_lock);
@ -2329,6 +2333,31 @@ pmap_extract(pm, va, pap)
return (TRUE);
}
/*
* Change protection on a kernel address.
* This should only be called from MD code.
*/
void
pmap_kprotect(va, prot)
vaddr_t va;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
int64_t data;
simple_lock(&pm->pm_lock);
data = pseg_get(pm, va);
if (prot & VM_PROT_WRITE) {
data |= (TLB_W|TLB_REAL_W);
} else {
data &= ~(TLB_W|TLB_REAL_W);
}
(void) pseg_set(pm, va, data, 0);
tsb_invalidate(pm->pm_ctx, va);
tlb_flush_pte(va, pm->pm_ctx);
simple_unlock(&pm->pm_lock);
}
/*
* Return the number bytes that pmap_dumpmmu() will dump.
*/
@ -2508,33 +2537,6 @@ ptelookup_va(va)
return (tsbptr / sizeof(pte_t));
}
#if notyet
void
tsb_enter(ctx, va, data)
int ctx;
int64_t va;
int64_t data;
{
int i, s;
int64_t pa;
i = ptelookup_va(va);
s = splvm();
pa = tsb[i].data&TLB_PA_MASK;
/*
* If we use fast DMMU access fault handlers to track
* referenced and modified bits, we should save the
* TSB entry's state here. Since we don't, we don't.
*/
/* Do not use global entries */
tsb[i].tag = TSB_TAG(0,ctx,va);
tsb[i].data = data;
tlb_flush_pte(va, ctx); /* Force reload -- protections may be changed */
splx(s);
}
#endif
/*
* Do whatever is needed to sync the MOD/REF flags
*/
@ -2545,7 +2547,7 @@ pmap_clear_modify(pg)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
pv_entry_t pv;
int i, changed = 0;
int changed = 0;
#ifdef DEBUG
int modified = 0;
@ -2587,16 +2589,13 @@ pmap_clear_modify(pg)
#else
data &= ~(TLB_MODIFY|TLB_W|TLB_REAL_W);
#endif
KASSERT((data & TLB_NFO) == 0);
if (pseg_set(pmap, va, data, 0)) {
printf("pmap_clear_modify: pseg empty!\n");
Debugger();
/* panic? */
}
if (pmap->pm_ctx || pmap == pmap_kernel()) {
i = ptelookup_va(va);
if (tsb[i].tag == TSB_TAG(0, pmap->pm_ctx, va))
tsb[i].data = 0;
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
/* Then clear the mod bit in the pv */
@ -2638,7 +2637,7 @@ pmap_clear_reference(pg)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
pv_entry_t pv;
int i, changed = 0;
int changed = 0;
#ifdef DEBUG
int referenced = 0;
#endif
@ -2649,10 +2648,6 @@ pmap_clear_reference(pg)
#endif
/* Clear all references */
pv = &pg->mdpage.mdpg_pvh;
#ifdef NOT_DEBUG
if (pv->pv_va & PV_MOD)
printf("pmap_clear_reference(): pg %p still modified\n", pg);
#endif
if (pv->pv_va & PV_REF)
changed |= 1;
pv->pv_va &= ~(PV_REF);
@ -2683,19 +2678,13 @@ pmap_clear_reference(pg)
changed |= 1;
data = 0;
#endif
KASSERT((data & TLB_NFO) == 0);
if (pseg_set(pmap, va, data, 0)) {
printf("pmap_clear_reference: pseg empty!\n");
Debugger();
/* panic? */
}
if (pmap->pm_ctx || pmap == pmap_kernel()) {
i = ptelookup_va(va);
/* Invalidate our TSB entry since ref info
is in the PTE */
if (tsb[i].tag == TSB_TAG(0, pmap->pm_ctx, va))
tsb[i].data = 0;
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
if (pv->pv_va & PV_REF)
@ -2869,7 +2858,6 @@ pmap_page_protect(pg, prot)
pv_entry_t pv, npv, firstpv;
struct pmap *pmap;
vaddr_t va;
int i;
boolean_t needflush = FALSE;
DPRINTF(PDB_CHANGEPROT,
@ -2914,7 +2902,6 @@ pmap_page_protect(pg, prot)
data &= ~clear;
data |= set;
KASSERT((data & TLB_NFO) == 0);
if (pseg_set(pmap, va, data, 0)) {
printf("pmap_page_protect: "
"pseg empty!\n");
@ -2922,17 +2909,7 @@ pmap_page_protect(pg, prot)
/* panic? */
}
if (pmap->pm_ctx || pmap == pmap_kernel()) {
i = ptelookup_va(va);
/*
* since we already know the va
* for each mapping, we don't need to
* scan the entire TSB.
*/
if (tsb[i].tag ==
TSB_TAG(0, pmap->pm_ctx, va))
tsb[i].data = 0;
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
simple_unlock(&pmap->pm_lock);
@ -2972,17 +2949,7 @@ pmap_page_protect(pg, prot)
/* panic? */
}
if (pmap->pm_ctx || pmap == pmap_kernel()) {
/* clear the entry in the TSB */
i = ptelookup_va(va);
/*
* since we already know the va for
* each mapping we don't need to scan
* the entire TSB.
*/
if (tsb[i].tag == TSB_TAG(0, pmap->pm_ctx, va))
tsb[i].data = 0;
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
if (pmap->pm_refs > 0) {
@ -3027,16 +2994,7 @@ pmap_page_protect(pg, prot)
}
if (pv->pv_pmap->pm_ctx ||
pv->pv_pmap == pmap_kernel()) {
i = ptelookup_va(va);
/*
* since we already know the va for
* each mapping we don't need to scan
* the entire TSB.
*/
if (tsb[i].tag == TSB_TAG(0, pmap->pm_ctx, va))
tsb[i].data = 0;
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
if (pmap->pm_refs > 0) {
@ -3169,7 +3127,6 @@ int
ctx_alloc(pm)
struct pmap *pm;
{
int64_t *tsbaddr;
int i, ctx;
KASSERT(pm != pmap_kernel());
@ -3188,10 +3145,11 @@ ctx_alloc(pm)
ctx_free(LIST_FIRST(&pmap_ctxlist));
}
for (i = TSBENTS - 1; i >= 0; i--) {
if (TSB_TAG_CTX(tsb[i].tag) != 0) {
tsbaddr = &tsb[i].data;
__asm __volatile("clrx [%0]"
: : "r" (tsbaddr) : "memory");
if (TSB_TAG_CTX(tsb_dmmu[i].tag) != 0) {
clrx(&tsb_dmmu[i].data);
}
if (TSB_TAG_CTX(tsb_immu[i].tag) != 0) {
clrx(&tsb_immu[i].data);
}
}
tlb_flush_all();
@ -3398,7 +3356,6 @@ pmap_page_cache(pm, pa, mode)
struct vm_page *pg;
pv_entry_t pv;
vaddr_t va;
int i;
DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
(unsigned long long)pa));
@ -3437,13 +3394,9 @@ pmap_page_cache(pm, pa, mode)
if (pv->pv_pmap != pm)
simple_unlock(&pv->pv_pmap->pm_lock);
if (pv->pv_pmap->pm_ctx || pv->pv_pmap == pmap_kernel()) {
i = ptelookup_va(va);
if (tsb[i].tag > 0 && tsb[i].tag ==
TSB_TAG(0, pv->pv_pmap->pm_ctx, va)) {
tsb[i].data = 0;
}
/* Force reload -- cache bits have changed */
tlb_flush_pte(va, pv->pv_pmap->pm_ctx);
tsb_invalidate(pv->pv_pmap->pm_ctx, va);
tlb_flush_pte(va, pv->pv_pmap->pm_ctx);
}
pv = pv->pv_next;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: aout_exec.c,v 1.15 2002/11/01 19:26:21 jdolecek Exp $ */
/* $NetBSD: aout_exec.c,v 1.16 2003/08/24 17:52:40 chs Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: aout_exec.c,v 1.15 2002/11/01 19:26:21 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: aout_exec.c,v 1.16 2003/08/24 17:52:40 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_syscall_debug.h"
@ -62,7 +62,9 @@ void syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct emul emul_netbsd_aout = {
struct uvm_object *emul_netbsd_aout_object;
const struct emul emul_netbsd_aout = {
"netbsd",
"/emul/aout",
#ifndef __HAVE_MINIMAL_EMUL
@ -81,6 +83,7 @@ struct emul emul_netbsd_aout = {
trapsignal,
sigcode,
esigcode,
&emul_netbsd_aout_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: aoutm68k_exec.c,v 1.12 2002/11/10 20:59:04 jdolecek Exp $ */
/* $NetBSD: aoutm68k_exec.c,v 1.13 2003/08/24 17:52:40 chs Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: aoutm68k_exec.c,v 1.12 2002/11/10 20:59:04 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: aoutm68k_exec.c,v 1.13 2003/08/24 17:52:40 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_syscall_debug.h"
@ -59,7 +59,9 @@ extern const char * const aoutm68k_syscallnames[];
extern char sigcode[], esigcode[];
void aoutm68k_syscall_intern __P((struct proc *));
struct emul emul_netbsd_aoutm68k = {
struct uvm_object *emul_netbsd_aoutm68k_object;
const struct emul emul_netbsd_aoutm68k = {
"aoutm68k",
"/emul/aout",
#ifndef __HAVE_MINIMAL_EMUL
@ -78,6 +80,7 @@ struct emul emul_netbsd_aoutm68k = {
trapsignal,
sigcode,
esigcode,
&emul_netbsd_aoutm68k_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: darwin_exec.c,v 1.16 2003/06/29 22:29:15 fvdl Exp $ */
/* $NetBSD: darwin_exec.c,v 1.17 2003/08/24 17:52:40 chs Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
#include "opt_compat_darwin.h" /* For COMPAT_DARWIN in mach_port.h */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: darwin_exec.c,v 1.16 2003/06/29 22:29:15 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: darwin_exec.c,v 1.17 2003/08/24 17:52:40 chs Exp $");
#include "opt_syscall_debug.h"
@ -80,6 +80,8 @@ void syscall(void);
void mach_syscall_intern(struct proc *);
#endif
struct uvm_object *emul_darwin_object;
const struct emul emul_darwin = {
"darwin",
"/emul/darwin",
@ -99,6 +101,7 @@ const struct emul emul_darwin = {
trapsignal,
sigcode,
esigcode,
&emul_darwin_object,
setregs,
darwin_e_proc_exec,
darwin_e_proc_fork,

View File

@ -1,4 +1,4 @@
/* $NetBSD: freebsd_exec.c,v 1.19 2002/11/01 19:26:21 jdolecek Exp $ */
/* $NetBSD: freebsd_exec.c,v 1.20 2003/08/24 17:52:41 chs Exp $ */
/*
* Copyright (c) 1993, 1994 Christopher G. Demetriou
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: freebsd_exec.c,v 1.19 2002/11/01 19:26:21 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: freebsd_exec.c,v 1.20 2003/08/24 17:52:41 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -50,6 +50,8 @@ void syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct uvm_object *emul_freebsd_object;
const struct emul emul_freebsd = {
"freebsd",
"/emul/freebsd",
@ -65,6 +67,7 @@ const struct emul emul_freebsd = {
trapsignal,
freebsd_sigcode,
freebsd_esigcode,
&emul_freebsd_object,
freebsd_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpux_exec.c,v 1.37 2003/06/29 22:29:17 fvdl Exp $ */
/* $NetBSD: hpux_exec.c,v 1.38 2003/08/24 17:52:41 chs Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hpux_exec.c,v 1.37 2003/06/29 22:29:17 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: hpux_exec.c,v 1.38 2003/08/24 17:52:41 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -108,6 +108,8 @@ void hpux_syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct uvm_object *emul_hpux_object;
const struct emul emul_hpux = {
"hpux",
"/emul/hpux",
@ -123,6 +125,7 @@ const struct emul emul_hpux = {
trapsignal,
sigcode,
esigcode,
&emul_hpux_object,
hpux_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: ibcs2_exec.c,v 1.52 2002/12/21 15:48:57 kristerw Exp $ */
/* $NetBSD: ibcs2_exec.c,v 1.53 2003/08/24 17:52:42 chs Exp $ */
/*
* Copyright (c) 1994, 1995, 1998 Scott Bartram
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ibcs2_exec.c,v 1.52 2002/12/21 15:48:57 kristerw Exp $");
__KERNEL_RCSID(0, "$NetBSD: ibcs2_exec.c,v 1.53 2003/08/24 17:52:42 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -67,6 +67,8 @@ void syscall __P((void));
int ibcs2_debug = 1;
#endif
struct uvm_object *emul_ibcs2_object;
const struct emul emul_ibcs2 = {
"ibcs2",
"/emul/ibcs2",
@ -82,6 +84,7 @@ const struct emul emul_ibcs2 = {
trapsignal,
ibcs2_sigcode,
ibcs2_esigcode,
&emul_ibcs2_object,
ibcs2_setregs,
ibcs2_e_proc_exec,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: irix_exec.c,v 1.29 2003/06/28 08:31:16 he Exp $ */
/* $NetBSD: irix_exec.c,v 1.30 2003/08/24 17:52:42 chs Exp $ */
/*-
* Copyright (c) 2001-2002 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: irix_exec.c,v 1.29 2003/06/28 08:31:16 he Exp $");
__KERNEL_RCSID(0, "$NetBSD: irix_exec.c,v 1.30 2003/08/24 17:52:42 chs Exp $");
#ifdef _KERNEL_OPT
#include "opt_syscall_debug.h"
@ -98,6 +98,7 @@ const struct emul emul_irix = {
trapsignal,
NULL,
NULL,
NULL,
setregs,
irix_e_proc_exec,
irix_e_proc_fork,

View File

@ -1,4 +1,4 @@
/* $NetBSD: irix_exec.h,v 1.19 2003/06/29 22:29:22 fvdl Exp $ */
/* $NetBSD: irix_exec.h,v 1.20 2003/08/24 17:52:42 chs Exp $ */
/*-
* Copyright (c) 2001-2002 The NetBSD Foundation, Inc.
@ -106,7 +106,6 @@ int irix_elf64_probe __P((struct proc *, struct exec_package *, void *,
void irix_n32_setregs __P((struct lwp *, struct exec_package *, u_long));
extern const struct emul emul_irix;
extern const struct emul emul_irix;
#endif /* !_IRIX_EXEC_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: linux_machdep.c,v 1.94 2003/08/21 08:36:56 hannken Exp $ */
/* $NetBSD: linux_machdep.c,v 1.95 2003/08/24 17:52:43 chs Exp $ */
/*-
* Copyright (c) 1995, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.94 2003/08/21 08:36:56 hannken Exp $");
__KERNEL_RCSID(0, "$NetBSD: linux_machdep.c,v 1.95 2003/08/24 17:52:43 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
@ -174,7 +174,7 @@ linux_setregs(l, epp, stack)
tf->tf_ecx = 0;
tf->tf_eax = 0;
tf->tf_eip = epp->ep_entry;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
tf->tf_eflags = PSL_USERSET;
tf->tf_esp = stack;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
@ -413,7 +413,7 @@ linux_old_sendsig(sig, mask, code)
tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
tf->tf_eip = (int)p->p_sigctx.ps_sigcode;
tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
tf->tf_cs = GSEL(GUCODEBIG_SEL, SEL_UPL);
tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
tf->tf_esp = (int)fp;
tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);

View File

@ -1,4 +1,4 @@
/* $NetBSD: linux_exec.c,v 1.64 2003/06/29 22:29:26 fvdl Exp $ */
/* $NetBSD: linux_exec.c,v 1.65 2003/08/24 17:52:43 chs Exp $ */
/*-
* Copyright (c) 1994, 1995, 1998, 2000 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: linux_exec.c,v 1.64 2003/06/29 22:29:26 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: linux_exec.c,v 1.65 2003/08/24 17:52:43 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -111,6 +111,9 @@ linux_sys_execve(l, v, retval)
/*
* Emulation switch.
*/
struct uvm_object *emul_linux_object;
const struct emul emul_linux = {
"linux",
"/emul/linux",
@ -126,6 +129,7 @@ const struct emul emul_linux = {
linux_trapsignal,
linux_sigcode,
linux_esigcode,
&emul_linux_object,
linux_setregs,
linux_e_proc_exec,
linux_e_proc_fork,

View File

@ -1,4 +1,4 @@
/* $NetBSD: mach_exec.c,v 1.32 2003/06/29 22:29:34 fvdl Exp $ */
/* $NetBSD: mach_exec.c,v 1.33 2003/08/24 17:52:44 chs Exp $ */
/*-
* Copyright (c) 2001-2003 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mach_exec.c,v 1.32 2003/06/29 22:29:34 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: mach_exec.c,v 1.33 2003/08/24 17:52:44 chs Exp $");
#include "opt_syscall_debug.h"
@ -78,6 +78,8 @@ void syscall(void);
void mach_syscall_intern(struct proc *);
#endif
struct uvm_object *emul_mach_object;
const struct emul emul_mach = {
"mach",
"/emul/mach",
@ -97,6 +99,7 @@ const struct emul emul_mach = {
trapsignal,
sigcode,
esigcode,
&emul_mach_object,
setregs,
mach_e_proc_exec,
mach_e_proc_fork,

View File

@ -1,4 +1,4 @@
/* $NetBSD: netbsd32_netbsd.c,v 1.73 2003/06/29 22:29:39 fvdl Exp $ */
/* $NetBSD: netbsd32_netbsd.c,v 1.74 2003/08/24 17:52:44 chs Exp $ */
/*
* Copyright (c) 1998, 2001 Matthew R. Green
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: netbsd32_netbsd.c,v 1.73 2003/06/29 22:29:39 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: netbsd32_netbsd.c,v 1.74 2003/08/24 17:52:44 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_ddb.h"
@ -107,6 +107,8 @@ void netbsd32_syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct uvm_object *emul_netbsd32_object;
const struct emul emul_netbsd32 = {
"netbsd32",
"/emul/netbsd32",
@ -126,6 +128,7 @@ const struct emul emul_netbsd32 = {
trapsignal,
netbsd32_sigcode,
netbsd32_esigcode,
&emul_netbsd32_object,
netbsd32_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: osf1_exec.c,v 1.31 2002/11/01 19:26:23 jdolecek Exp $ */
/* $NetBSD: osf1_exec.c,v 1.32 2003/08/24 17:52:44 chs Exp $ */
/*
* Copyright (c) 1999 Christopher G. Demetriou. All rights reserved.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: osf1_exec.c,v 1.31 2002/11/01 19:26:23 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: osf1_exec.c,v 1.32 2003/08/24 17:52:44 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -52,6 +52,8 @@ void osf1_syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct uvm_object *emul_osf1_object;
const struct emul emul_osf1 = {
"osf1",
"/emul/osf1",
@ -67,6 +69,7 @@ const struct emul emul_osf1 = {
trapsignal,
osf1_sigcode,
osf1_esigcode,
&emul_osf1_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: pecoff_emul.c,v 1.5 2003/06/23 15:10:03 martin Exp $ */
/* $NetBSD: pecoff_emul.c,v 1.6 2003/08/24 17:52:45 chs Exp $ */
/*
* Copyright (c) 2000 Masaru OKI
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pecoff_emul.c,v 1.5 2003/06/23 15:10:03 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: pecoff_emul.c,v 1.6 2003/08/24 17:52:45 chs Exp $");
/*#define DEBUG_PECOFF*/
@ -75,6 +75,8 @@ void syscall_intern(struct proc *);
void syscall(void);
#endif
struct uvm_object *emul_pecoff_object;
const struct emul emul_pecoff = {
"pecoff",
"/emul/pecoff",
@ -94,6 +96,7 @@ const struct emul emul_pecoff = {
trapsignal,
sigcode,
esigcode,
&emul_pecoff_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: sunos_exec.c,v 1.40 2002/11/10 20:59:03 jdolecek Exp $ */
/* $NetBSD: sunos_exec.c,v 1.41 2003/08/24 17:52:45 chs Exp $ */
/*
* Copyright (c) 1993 Theo de Raadt
@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sunos_exec.c,v 1.40 2002/11/10 20:59:03 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: sunos_exec.c,v 1.41 2003/08/24 17:52:45 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_syscall_debug.h"
@ -57,7 +57,9 @@ void sunos_syscall_intern __P((struct proc *));
void syscall __P((void));
#endif
struct emul emul_sunos = {
struct uvm_object *emul_sunos_object;
const struct emul emul_sunos = {
"sunos",
"/emul/sunos",
#ifndef __HAVE_MINIMAL_EMUL
@ -76,6 +78,7 @@ struct emul emul_sunos = {
trapsignal,
sunos_sigcode,
sunos_esigcode,
&emul_sunos_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: sunos32_exec.c,v 1.14 2002/11/10 20:59:04 jdolecek Exp $ */
/* $NetBSD: sunos32_exec.c,v 1.15 2003/08/24 17:52:45 chs Exp $ */
/*
* Copyright (c) 2001 Matthew R. Green
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sunos32_exec.c,v 1.14 2002/11/10 20:59:04 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: sunos32_exec.c,v 1.15 2003/08/24 17:52:45 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_syscall_debug.h"
@ -51,6 +51,8 @@ extern const char * const sunos32_syscallnames[];
extern char sunos_sigcode[], sunos_esigcode[];
void syscall __P((void));
struct uvm_object *emul_sunos32_object;
const struct emul emul_sunos = {
"sunos32",
"/emul/sunos",
@ -70,6 +72,7 @@ const struct emul emul_sunos = {
trapsignal,
sunos_sigcode,
sunos_esigcode,
&emul_sunos32_object,
sunos32_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_exec.c,v 1.49 2002/11/01 19:26:23 jdolecek Exp $ */
/* $NetBSD: svr4_exec.c,v 1.50 2003/08/24 17:52:46 chs Exp $ */
/*-
* Copyright (c) 1994, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_exec.c,v 1.49 2002/11/01 19:26:23 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_exec.c,v 1.50 2003/08/24 17:52:46 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -58,6 +58,8 @@ extern const char * const svr4_syscallnames[];
void syscall __P((void));
#endif
struct uvm_object *emul_svr4_object;
const struct emul emul_svr4 = {
"svr4",
"/emul/svr4",
@ -73,6 +75,7 @@ const struct emul emul_svr4 = {
trapsignal,
svr4_sigcode,
svr4_esigcode,
&emul_svr4_object,
svr4_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_32_exec.c,v 1.9 2002/11/01 19:26:24 jdolecek Exp $ */
/* $NetBSD: svr4_32_exec.c,v 1.10 2003/08/24 17:52:46 chs Exp $ */
/*-
* Copyright (c) 1994, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_32_exec.c,v 1.9 2002/11/01 19:26:24 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_32_exec.c,v 1.10 2003/08/24 17:52:46 chs Exp $");
#define ELFSIZE 32 /* XXX should die */
@ -60,6 +60,8 @@ extern const char * const svr4_32_syscallnames[];
void syscall __P((void));
#endif
struct uvm_object *emul_svr4_32_object;
const struct emul emul_svr4_32 = {
"svr4_32",
"/emul/svr4_32",
@ -75,6 +77,7 @@ const struct emul emul_svr4_32 = {
trapsignal,
svr4_32_sigcode,
svr4_32_esigcode,
&emul_svr4_32_object,
svr4_32_setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: ultrix_misc.c,v 1.86 2003/08/07 16:30:47 agc Exp $ */
/* $NetBSD: ultrix_misc.c,v 1.87 2003/08/24 17:52:46 chs Exp $ */
/*
* Copyright (c) 1995, 1997 Jonathan Stone (hereinafter referred to as the author)
@ -76,7 +76,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ultrix_misc.c,v 1.86 2003/08/07 16:30:47 agc Exp $");
__KERNEL_RCSID(0, "$NetBSD: ultrix_misc.c,v 1.87 2003/08/24 17:52:46 chs Exp $");
#if defined(_KERNEL_OPT)
#include "opt_nfsserver.h"
@ -156,6 +156,8 @@ void syscall_intern(struct proc *);
void syscall __P((void));
#endif
struct uvm_object *emul_ultrix_object;
const struct emul emul_ultrix = {
"ultrix",
"/emul/ultrix",
@ -171,6 +173,7 @@ const struct emul emul_ultrix = {
trapsignal,
ultrix_sigcode,
ultrix_esigcode,
&emul_ultrix_object,
setregs,
NULL,
NULL,

View File

@ -1,4 +1,4 @@
/* $NetBSD: exec_subr.c,v 1.39 2003/08/21 15:17:03 yamt Exp $ */
/* $NetBSD: exec_subr.c,v 1.40 2003/08/24 17:52:47 chs Exp $ */
/*
* Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.39 2003/08/21 15:17:03 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.40 2003/08/24 17:52:47 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -121,7 +121,7 @@ kill_vmcmds(struct exec_vmcmd_set *evsp)
for (i = 0; i < evsp->evs_used; i++) {
vcp = &evsp->evs_cmds[i];
if (vcp->ev_vp != NULLVP)
if (vcp->ev_vp != NULL)
vrele(vcp->ev_vp);
}
evsp->evs_used = evsp->evs_cnt = 0;
@ -344,12 +344,11 @@ exec_setup_stack(struct proc *p, struct exec_package *epp)
access_size), noaccess_size);
if (noaccess_size > 0) {
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
noaccess_linear_min, NULLVP, 0, VM_PROT_NONE);
noaccess_linear_min, NULL, 0, VM_PROT_NONE);
}
KASSERT(access_size > 0);
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
access_linear_min, NULLVP, 0,
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE);
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exec.c,v 1.170 2003/07/16 22:42:48 dsl Exp $ */
/* $NetBSD: kern_exec.c,v 1.171 2003/08/24 17:52:47 chs Exp $ */
/*-
* Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.170 2003/07/16 22:42:48 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.171 2003/08/24 17:52:47 chs Exp $");
#include "opt_ktrace.h"
#include "opt_syscall_debug.h"
@ -68,6 +68,8 @@ __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.170 2003/07/16 22:42:48 dsl Exp $");
#include <machine/cpu.h>
#include <machine/reg.h>
static int exec_sigcode_map(struct proc *, const struct emul *);
#ifdef DEBUG_EXEC
#define DPRINTF(a) uprintf a
#else
@ -129,6 +131,8 @@ void syscall_intern(struct proc *);
void syscall(void);
#endif
struct uvm_object *emul_netbsd_object;
const struct emul emul_netbsd = {
"netbsd",
NULL, /* emulation path */
@ -148,6 +152,7 @@ const struct emul emul_netbsd = {
trapsignal,
sigcode,
esigcode,
&emul_netbsd_object,
setregs,
NULL,
NULL,
@ -629,7 +634,7 @@ sys_execve(struct lwp *l, void *v, register_t *retval)
* it's easy to decrement the stack pointer a little bit to
* allocate the space for these few words and pass the new
* stack pointer to _rtld. When the stack grows up, however,
* a few words before argc is part of the signal trampoline,
* a few words before argc is part of the signal trampoline, XXX
* so we have a problem.
*
* Instead of changing how _rtld works, we take the easy way
@ -664,21 +669,6 @@ sys_execve(struct lwp *l, void *v, register_t *retval)
goto exec_abort;
}
/* copy out the process's signal trampoline code */
if (szsigcode) {
p->p_sigctx.ps_sigcode = STACK_ALLOC(STACK_MAX(p->p_psstr,
sizeof(struct ps_strings)), szsigcode);
if ((error = copyout((char *)pack.ep_es->es_emul->e_sigcode,
p->p_sigctx.ps_sigcode, szsigcode)) != 0) {
DPRINTF(("execve: sig trampoline copyout failed\n"));
goto exec_abort;
}
#ifdef PMAP_NEED_PROCWR
/* This is code. Let the pmap do what is needed. */
pmap_procwr(p, (vaddr_t)p->p_sigctx.ps_sigcode, szsigcode);
#endif
}
stopprofclock(p); /* stop profiling */
fdcloseexec(p); /* handle close on exec */
execsigs(p); /* reset catched signals */
@ -766,6 +756,10 @@ sys_execve(struct lwp *l, void *v, register_t *retval)
if (pack.ep_es->es_setregs)
(*pack.ep_es->es_setregs)(l, &pack, (u_long) stack);
/* map the process's signal trampoline code */
if (exec_sigcode_map(p, pack.ep_es->es_emul))
goto exec_abort;
if (p->p_flag & P_TRACED)
psignal(p, SIGTRAP);
@ -1271,3 +1265,64 @@ exec_init(int init_boot)
}
#endif /* !LKM */
static int
exec_sigcode_map(struct proc *p, const struct emul *e)
{
vaddr_t va;
vsize_t sz;
int error;
struct uvm_object *uobj;
if (e->e_sigobject == NULL) {
return 0;
}
/*
* If we don't have a sigobject for this emulation, create one.
*
* sigobject is an anonymous memory object (just like SYSV shared
* memory) that we keep a permanent reference to and that we map
* in all processes that need this sigcode. The creation is simple,
* we create an object, add a permanent reference to it, map it in
* kernel space, copy out the sigcode to it and unmap it.
* The we map it with PROT_READ|PROT_EXEC into the process just
* the way sys_mmap would map it.
*/
sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
uobj = *e->e_sigobject;
if (uobj == NULL) {
uobj = uao_create(sz, 0);
uao_reference(uobj);
va = vm_map_min(kernel_map);
if ((error = uvm_map(kernel_map, &va, round_page(sz),
uobj, 0, 0,
UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
printf("kernel mapping failed %d\n", error);
(*uobj->pgops->pgo_detach)(uobj);
return (error);
}
memcpy((void *)va, e->e_sigcode, sz);
#ifdef PMAP_NEED_PROCWR
pmap_procwr(&proc0, va, sz);
#endif
uvm_unmap(kernel_map, va, va + round_page(sz));
*e->e_sigobject = uobj;
}
/* Just a hint to uvm_mmap where to put it. */
va = round_page((vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ);
(*uobj->pgops->pgo_reference)(uobj);
error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz),
uobj, 0, 0,
UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE,
UVM_ADV_RANDOM, 0));
if (error) {
(*uobj->pgops->pgo_detach)(uobj);
return (error);
}
p->p_sigctx.ps_sigcode = (void *)va;
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.72 2003/08/07 16:31:48 agc Exp $ */
/* $NetBSD: kern_resource.c,v 1.73 2003/08/24 17:52:47 chs Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.72 2003/08/07 16:31:48 agc Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.73 2003/08/24 17:52:47 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -323,7 +323,7 @@ dosetrlimit(p, cred, which, limp)
vm_prot_t prot;
if (limp->rlim_cur > alimp->rlim_cur) {
prot = VM_PROT_ALL;
prot = VM_PROT_READ | VM_PROT_WRITE;
size = limp->rlim_cur - alimp->rlim_cur;
addr = USRSTACK - limp->rlim_cur;
} else {

View File

@ -1,4 +1,4 @@
/* $NetBSD: proc.h,v 1.168 2003/08/07 16:34:10 agc Exp $ */
/* $NetBSD: proc.h,v 1.169 2003/08/24 17:52:47 chs Exp $ */
/*-
* Copyright (c) 1986, 1989, 1991, 1993
@ -104,6 +104,7 @@ struct emul {
char *e_sigcode; /* Start of sigcode */
char *e_esigcode; /* End of sigcode */
/* Set registers before execution */
struct uvm_object **e_sigobject;/* shared sigcode object */
void (*e_setregs) __P((struct lwp *, struct exec_package *,
u_long));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_unix.c,v 1.28 2003/05/25 13:00:40 simonb Exp $ */
/* $NetBSD: uvm_unix.c,v 1.29 2003/08/24 17:52:48 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_unix.c,v 1.28 2003/05/25 13:00:40 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_unix.c,v 1.29 2003/08/24 17:52:48 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -95,12 +95,14 @@ sys_obreak(l, v, retval)
/*
* grow or shrink?
*/
if (new > old) {
error = uvm_map(&vm->vm_map, &old, new - old, NULL,
UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
UVM_MAPFLAG(UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL,
UVM_INH_COPY,
UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
if (error) {
uprintf("sbrk: grow %ld failed, error = %d\n",
new - old, error);