Implement scheduler lock protocol, this fixes PR arm/10863.
Also add correct locking when freeing pages in pmap_destroy (fix from potr) This now means that arm32 kernels can be built with LOCKDEBUG enabled. (only tested on cats though)
This commit is contained in:
parent
ba77dc646c
commit
a9e806ee0c
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: cpuswitch.S,v 1.6 2002/01/25 19:19:24 thorpej Exp $ */
|
||||
/* $NetBSD: cpuswitch.S,v 1.7 2002/05/14 19:22:34 chris Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994-1998 Mark Brinicombe.
|
||||
@ -231,30 +231,35 @@ Lblock_userspace_access:
|
||||
/*
|
||||
* Idle loop, exercised while waiting for a process to wake up.
|
||||
*/
|
||||
/* LINTSTUB: Ignore */
|
||||
ASENTRY_NP(idle)
|
||||
|
||||
#if defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
#endif
|
||||
/* Enable interrupts */
|
||||
IRQenable
|
||||
|
||||
/* XXX - r1 needs to be preserved for cpu_switch */
|
||||
mov r7, r1
|
||||
ldr r3, Lcpufuncs
|
||||
mov r0, #0
|
||||
add lr, pc, #Lidle_slept - . - 8
|
||||
ldr pc, [r3, #CF_SLEEP]
|
||||
|
||||
/* should also call the uvm pageidlezero stuff */
|
||||
|
||||
Lidle_slept:
|
||||
mov r1, r7
|
||||
|
||||
/* Disable interrupts while we check for an active queue */
|
||||
IRQdisable
|
||||
#if defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_lock_idle)
|
||||
#endif
|
||||
ldr r7, Lwhichqs
|
||||
ldr r3, [r7]
|
||||
teq r3, #0x00000000
|
||||
bne sw1
|
||||
|
||||
/* All processes are still asleep so idle a while longer */
|
||||
b _ASM_LABEL(idle)
|
||||
|
||||
beq _ASM_LABEL(idle)
|
||||
b Lidle_ret
|
||||
|
||||
/*
|
||||
* Find a new process to run, save the current context and
|
||||
@ -287,9 +292,15 @@ ENTRY(cpu_switch)
|
||||
ldr r7, Lcurpcb
|
||||
str r0, [r7]
|
||||
|
||||
/* Lower the spl level to spl0 and get the current spl level. */
|
||||
/* stash the old proc */
|
||||
mov r7, r1
|
||||
|
||||
#if defined(LOCKDEBUG)
|
||||
/* release the sched_lock before handling interrupts */
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
#endif
|
||||
|
||||
/* Lower the spl level to spl0 and get the current spl level. */
|
||||
#ifdef __NEWINTR
|
||||
mov r0, #(IPL_NONE)
|
||||
bl _C_LABEL(_spllower)
|
||||
@ -305,14 +316,19 @@ ENTRY(cpu_switch)
|
||||
/* Push the old spl level onto the stack */
|
||||
str r0, [sp, #-0x0004]!
|
||||
|
||||
mov r1, r7
|
||||
mov r5, r7
|
||||
|
||||
/* First phase : find a new process */
|
||||
|
||||
/* rem: r1 = old proc */
|
||||
/* rem: r5 = old proc */
|
||||
|
||||
switch_search:
|
||||
|
||||
Lswitch_search:
|
||||
IRQdisable
|
||||
#if defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_lock_idle)
|
||||
#endif
|
||||
|
||||
|
||||
/* Do we have any active queues */
|
||||
ldr r7, Lwhichqs
|
||||
@ -321,8 +337,11 @@ switch_search:
|
||||
/* If not we must idle until we do. */
|
||||
teq r3, #0x00000000
|
||||
beq _ASM_LABEL(idle)
|
||||
Lidle_ret:
|
||||
|
||||
/* restore old proc */
|
||||
mov r1, r5
|
||||
|
||||
sw1:
|
||||
/* rem: r1 = old proc */
|
||||
/* rem: r3 = whichqs */
|
||||
/* rem: interrupts are disabled */
|
||||
@ -407,6 +426,15 @@ sw1:
|
||||
*/
|
||||
str r0, [r6, #(P_BACK)]
|
||||
|
||||
#if defined(LOCKDEBUG)
|
||||
/*
|
||||
* unlock the sched_lock, but leave interrupts off, for now.
|
||||
*/
|
||||
mov r7, r1
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
mov r1, r7
|
||||
#endif
|
||||
|
||||
/* p->p_cpu initialized in fork1() for single-processor */
|
||||
|
||||
/* Process is now on a processor. */
|
||||
@ -569,7 +597,13 @@ Lproc0:
|
||||
Lkernel_map:
|
||||
.word _C_LABEL(kernel_map)
|
||||
|
||||
/*
|
||||
* void switch_exit(struct proc *p);
|
||||
* Switch to proc0's saved context and deallocate the address space and kernel
|
||||
* stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
|
||||
*/
|
||||
|
||||
/* LINTSTUB: Func: void switch_exit(struct proc *p) */
|
||||
ENTRY(switch_exit)
|
||||
/*
|
||||
* r0 = proc
|
||||
@ -633,9 +667,10 @@ Lse_context_switched:
|
||||
mov r0, #0x00000000
|
||||
str r0, [r1]
|
||||
|
||||
ldr r1, Lproc0
|
||||
b switch_search
|
||||
ldr r5, Lproc0
|
||||
b Lswitch_search
|
||||
|
||||
/* LINTSTUB: Func: void savectx(struct pcb *pcb) */
|
||||
ENTRY(savectx)
|
||||
/*
|
||||
* r0 = pcb
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: pmap.c,v 1.96 2002/04/24 17:35:10 thorpej Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2002 Wasabi Systems, Inc.
|
||||
@ -143,7 +143,7 @@
|
||||
#include <machine/param.h>
|
||||
#include <arm/arm32/katelib.h>
|
||||
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.96 2002/04/24 17:35:10 thorpej Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $");
|
||||
#ifdef PMAP_DEBUG
|
||||
#define PDEBUG(_lev_,_stat_) \
|
||||
if (pmap_debug_level >= (_lev_)) \
|
||||
@ -1597,11 +1597,16 @@ pmap_destroy(struct pmap *pmap)
|
||||
* entries looking for pt's
|
||||
* taken from i386 pmap.c
|
||||
*/
|
||||
/*
|
||||
* vmobjlock must be held while freeing pages
|
||||
*/
|
||||
simple_lock(&pmap->pm_obj.vmobjlock);
|
||||
while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
|
||||
KASSERT((page->flags & PG_BUSY) == 0);
|
||||
page->wire_count = 0;
|
||||
uvm_pagefree(page);
|
||||
}
|
||||
simple_unlock(&pmap->pm_obj.vmobjlock);
|
||||
|
||||
/* Free the page dir */
|
||||
pmap_freepagedir(pmap);
|
||||
@ -2999,6 +3004,7 @@ pmap_dump_pvlist(phys, m)
|
||||
simple_lock(&pg->mdpage.pvh_slock);
|
||||
printf("%s %08lx:", m, phys);
|
||||
if (pg->mdpage.pvh_list == NULL) {
|
||||
simple_unlock(&pg->mdpage.pvh_slock);
|
||||
printf(" no mappings\n");
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user