Merge vmlocking2 to head.

This commit is contained in:
ad 2008-01-02 11:48:20 +00:00
parent a41df38359
commit 4a780c9ae2
267 changed files with 7420 additions and 6904 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.34 2007/10/25 12:48:11 yamt Exp $ */
/* $NetBSD: fd.c,v 1.35 2008/01/02 11:48:20 ad Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -89,7 +89,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.34 2007/10/25 12:48:11 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.35 2008/01/02 11:48:20 ad Exp $");
#include "opt_ddb.h"
@ -1513,11 +1513,11 @@ fdformat(dev, finfo, l)
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = (struct buf *)malloc(sizeof(struct buf), M_TEMP, M_NOWAIT);
bp = getiobuf(NULL, false);
if(bp == 0)
return ENOBUFS;
memset((void *)bp, 0, sizeof(struct buf));
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_cflags |= BC_BUSY;
bp->b_proc = l->l_proc;
bp->b_dev = dev;
@ -1540,21 +1540,22 @@ fdformat(dev, finfo, l)
fdstrategy(bp);
/* ...and wait for it to complete */
s = splbio();
while(!(bp->b_flags & B_DONE)) {
rv = tsleep((void *)bp, PRIBIO, "fdform", 20 * hz);
/* XXX very dodgy */
mutex_enter(bp->b_objlock);
while (!(bp->b_oflags & BO_DONE)) {
rv = cv_timedwait(&bp->b_done, 20 * hz);
if (rv == EWOULDBLOCK)
break;
}
splx(s);
mutex_exit(bp->b_objlock);
if (rv == EWOULDBLOCK) {
/* timed out */
rv = EIO;
biodone(bp);
} else if (bp->b_error != 0)
rv = bp->b_error;
free(bp, M_TEMP);
putiobuf(bp);
return rv;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.16 2007/10/17 19:52:54 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:20 ad Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -29,7 +29,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2007/10/17 19:52:54 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:20 ad Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@ -105,7 +105,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -219,7 +219,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
dlp = (struct disklabel *)((char*)bp->b_data + LABELOFFSET);
*dlp = *lp; /* struct assignment */
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.35 2007/10/17 19:52:55 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.36 2008/01/02 11:48:21 ad Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -29,7 +29,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.35 2007/10/17 19:52:55 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.36 2008/01/02 11:48:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -110,7 +110,7 @@ readdisklabel(dev, strat, lp, clp)
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~BO_DONE;
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -244,8 +244,9 @@ writedisklabel(dev, strat, lp, clp)
dp[63] = sum;
}
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~B_READ;
bp->b_flags |= B_WRITE;
bp->b_oflags &= ~BO_DONE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,7 +1,7 @@
/* $NetBSD: pmap.c,v 1.228 2007/11/07 00:23:14 ad Exp $ */
/* $NetBSD: pmap.c,v 1.229 2008/01/02 11:48:21 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
* Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -145,7 +145,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228 2007/11/07 00:23:14 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.229 2008/01/02 11:48:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -154,14 +154,12 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.228 2007/11/07 00:23:14 ad Exp $");
#include <sys/pool.h>
#include <sys/user.h>
#include <sys/buf.h>
#ifdef SYSVSHM
#include <sys/shm.h>
#endif
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <uvm/uvm.h>
#include <machine/atomic.h>
#include <machine/cpu.h>
#if defined(_PMAP_MAY_USE_PROM_CONSOLE) || defined(MULTIPROCESSOR)
#include <machine/rpb.h>
#endif
@ -326,11 +324,8 @@ static struct pmap_asn_info pmap_asn_info[ALPHA_MAXPROCS];
/*
* Locking:
*
* This pmap module uses two types of locks: `normal' (sleep)
* locks and `simple' (spin) locks. They are used as follows:
*
* READ/WRITE SPIN LOCKS
* ---------------------
* READ/WRITE LOCKS
* ----------------
*
* * pmap_main_lock - This lock is used to prevent deadlock and/or
* provide mutex access to the pmap module. Most operations lock
@ -343,10 +338,10 @@ static struct pmap_asn_info pmap_asn_info[ALPHA_MAXPROCS];
* the PV->pmap direction. Since only one thread can hold a write
* lock at a time, this provides the mutex.
*
* SIMPLE LOCKS
* ------------
* MUTEXES
* -------
*
* * pm_slock (per-pmap) - This lock protects all of the members
* * pm_lock (per-pmap) - This lock protects all of the members
* of the pmap structure itself. This lock will be asserted
* in pmap_activate() and pmap_deactivate() from a critical
* section of mi_switch(), and must never sleep. Note that
@ -354,27 +349,27 @@ static struct pmap_asn_info pmap_asn_info[ALPHA_MAXPROCS];
* memory allocation *must* be blocked while this lock is
* asserted.
*
* * pvh_slock (per-vm_page) - This lock protects the PV list
* * pvh_lock (per-vm_page) - This lock protects the PV list
* for a specified managed page.
*
* * pmap_all_pmaps_slock - This lock protects the global list of
* all pmaps. Note that a pm_slock must never be held while this
* * pmap_all_pmaps_lock - This lock protects the global list of
* all pmaps. Note that a pm_lock must never be held while this
* lock is held.
*
* * pmap_growkernel_slock - This lock protects pmap_growkernel()
* * pmap_growkernel_lock - This lock protects pmap_growkernel()
* and the virtual_end variable.
*
* There is a lock ordering constraint for pmap_growkernel_slock.
* There is a lock ordering constraint for pmap_growkernel_lock.
* pmap_growkernel() acquires the locks in the following order:
*
* pmap_growkernel_slock -> pmap_all_pmaps_slock ->
* pmap->pm_slock
* pmap_growkernel_lock -> pmap_all_pmaps_lock ->
* pmap->pm_lock
*
* But pmap_lev1map_create() is called with pmap->pm_slock held,
* and also needs to acquire the pmap_growkernel_slock. So,
* But pmap_lev1map_create() is called with pmap->pm_lock held,
* and also needs to acquire the pmap_growkernel_lock. So,
* we require that the caller of pmap_lev1map_create() (currently,
* the only caller is pmap_enter()) acquire pmap_growkernel_slock
* before acquring pmap->pm_slock.
* the only caller is pmap_enter()) acquire pmap_growkernel_lock
* before acquring pmap->pm_lock.
*
* Address space number management (global ASN counters and per-pmap
* ASN state) are not locked; they use arrays of values indexed
@ -384,14 +379,14 @@ static struct pmap_asn_info pmap_asn_info[ALPHA_MAXPROCS];
* with the pmap already locked by the caller (which will be
* an interface function).
*/
/* static struct lock pmap_main_lock; */
static struct simplelock pmap_all_pmaps_slock;
static struct simplelock pmap_growkernel_slock;
static krwlock_t pmap_main_lock;
static kmutex_t pmap_all_pmaps_lock;
static kmutex_t pmap_growkernel_lock;
#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock, RW_READER)
#define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock)
#define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER)
#define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock)
#if defined(MULTIPROCESSOR)
/*
@ -421,21 +416,9 @@ static struct pmap_tlb_shootdown_q {
int pq_pte; /* aggregate PTE bits */
int pq_count; /* number of pending requests */
int pq_tbia; /* pending global flush */
struct simplelock pq_slock; /* spin lock on queue */
kmutex_t pq_lock; /* spin lock on queue */
} pmap_tlb_shootdown_q[ALPHA_MAXPROCS];
#define PSJQ_LOCK(pq, s) \
do { \
s = splvm(); \
simple_lock(&(pq)->pq_slock); \
} while (/*CONSTCOND*/0)
#define PSJQ_UNLOCK(pq, s) \
do { \
simple_unlock(&(pq)->pq_slock); \
splx(s); \
} while (/*CONSTCOND*/0)
/* If we have more pending jobs than this, we just nail the whole TLB. */
#define PMAP_TLB_SHOOTDOWN_MAXJOBS 6
@ -511,16 +494,6 @@ static void pmap_physpage_free(paddr_t);
static int pmap_physpage_addref(void *);
static int pmap_physpage_delref(void *);
/*
* Define PMAP_NO_LAZY_LEV1MAP in order to have a lev1map allocated
* in pmap_create(), rather than when the first mapping is entered.
* This causes pmaps to use an extra page of memory if no mappings
* are entered in them, but in practice this is probably not going
* to be a problem, and it allows us to avoid locking pmaps in
* pmap_activate().
*/
#define PMAP_NO_LAZY_LEV1MAP
/*
* PMAP_ISACTIVE{,_TEST}:
*
@ -919,8 +892,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
(i*PAGE_SIZE*NPTEPG))] = pte;
}
/* Initialize the pmap_growkernel_slock. */
simple_lock_init(&pmap_growkernel_slock);
/* Initialize the pmap_growkernel_lock. */
mutex_init(&pmap_growkernel_lock, MUTEX_DEFAULT, IPL_NONE);
/*
* Set up level three page table (lev3map)
@ -952,8 +925,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
/*
* Initialize the locks.
*/
/* spinlockinit(&pmap_main_lock, "pmaplk", 0); */
simple_lock_init(&pmap_all_pmaps_slock);
rw_init(&pmap_main_lock);
mutex_init(&pmap_all_pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
/*
* Initialize kernel pmap. Note that all kernel mappings
@ -970,7 +943,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
pmap_kernel()->pm_asni[i].pma_asngen =
pmap_asn_info[i].pma_asngen;
}
simple_lock_init(&pmap_kernel()->pm_slock);
mutex_init(&pmap_kernel()->pm_lock, MUTEX_DEFAULT, IPL_NONE);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
#if defined(MULTIPROCESSOR)
@ -982,7 +955,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
IPL_VM);
for (i = 0; i < ALPHA_MAXPROCS; i++) {
TAILQ_INIT(&pmap_tlb_shootdown_q[i].pq_head);
simple_lock_init(&pmap_tlb_shootdown_q[i].pq_slock);
mutex_init(&pmap_tlb_shootdown_q[i].pq_lock, MUTEX_DEFAULT,
IPL_VM);
}
#endif
@ -998,7 +972,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
/*
* Mark the kernel pmap `active' on this processor.
*/
atomic_setbits_ulong(&pmap_kernel()->pm_cpus,
atomic_or_ulong(&pmap_kernel()->pm_cpus,
(1UL << cpu_number()));
}
@ -1197,16 +1171,14 @@ pmap_create(void)
/* XXX Locking? */
pmap->pm_asni[i].pma_asngen = pmap_asn_info[i].pma_asngen;
}
simple_lock_init(&pmap->pm_slock);
mutex_init(&pmap->pm_lock, MUTEX_DEFAULT, IPL_NONE);
simple_lock(&pmap_all_pmaps_slock);
mutex_enter(&pmap_all_pmaps_lock);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
simple_unlock(&pmap_all_pmaps_slock);
mutex_exit(&pmap_all_pmaps_lock);
#ifdef PMAP_NO_LAZY_LEV1MAP
i = pmap_lev1map_create(pmap, cpu_number());
KASSERT(i == 0);
#endif
return (pmap);
}
@ -1220,30 +1192,23 @@ pmap_create(void)
void
pmap_destroy(pmap_t pmap)
{
int refs;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_destroy(%p)\n", pmap);
#endif
PMAP_LOCK(pmap);
refs = --pmap->pm_count;
PMAP_UNLOCK(pmap);
if (refs > 0)
if (atomic_dec_uint_nv(&pmap->pm_count) > 0)
return;
/*
* Remove it from the global list of all pmaps.
*/
simple_lock(&pmap_all_pmaps_slock);
mutex_enter(&pmap_all_pmaps_lock);
TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
simple_unlock(&pmap_all_pmaps_slock);
mutex_exit(&pmap_all_pmaps_lock);
#ifdef PMAP_NO_LAZY_LEV1MAP
pmap_lev1map_destroy(pmap, cpu_number());
#endif
/*
* Since the pmap is supposed to contain no valid
@ -1252,6 +1217,7 @@ pmap_destroy(pmap_t pmap)
*/
KASSERT(pmap->pm_lev1map == kernel_lev1map);
mutex_destroy(&pmap->pm_lock);
pool_cache_put(&pmap_pmap_cache, pmap);
}
@ -1269,9 +1235,7 @@ pmap_reference(pmap_t pmap)
printf("pmap_reference(%p)\n", pmap);
#endif
PMAP_LOCK(pmap);
pmap->pm_count++;
PMAP_UNLOCK(pmap);
atomic_inc_uint(&pmap->pm_count);
}
/*
@ -1486,7 +1450,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
case VM_PROT_READ|VM_PROT_EXECUTE:
case VM_PROT_READ:
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
PMAP_LOCK(pv->pv_pmap);
if (*pv->pv_pte & (PG_KWE | PG_UWE)) {
@ -1499,7 +1463,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
PMAP_UNLOCK(pv->pv_pmap);
}
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
PMAP_TLB_SHOOTNOW();
return;
@ -1510,7 +1474,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = nextpv) {
nextpv = pv->pv_next;
pmap = pv->pv_pmap;
@ -1534,7 +1498,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
if (needkisync)
PMAP_SYNC_ISTREAM_KERNEL();
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
}
@ -1688,40 +1652,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
panic("pmap_enter: user pmap, invalid va 0x%lx", va);
#endif
#ifdef PMAP_NO_LAZY_LEV1MAP
KASSERT(pmap->pm_lev1map != kernel_lev1map);
#else
/*
* If we're still referencing the kernel kernel_lev1map,
* create a new level 1 page table. A reference will be
* added to the level 1 table when the level 2 table is
* created.
*/
if (pmap->pm_lev1map == kernel_lev1map) {
/*
* XXX Yuck.
* We have to unlock the pmap, lock the
* pmap_growkernel_slock, and re-lock the
* pmap here, in order to avoid a deadlock
* with pmap_growkernel().
*
* Because we unlock, we have a window for
* someone else to add a mapping, thus creating
* a level 1 map; pmap_lev1map_create() checks
* for this condition.
*/
PMAP_UNLOCK(pmap);
simple_lock(&pmap_growkernel_slock);
PMAP_LOCK(pmap);
error = pmap_lev1map_create(pmap, cpu_id);
simple_unlock(&pmap_growkernel_slock);
if (error) {
if (flags & PMAP_CANFAIL)
goto out;
panic("pmap_enter: unable to create lev1map");
}
}
#endif /* PMAP_NO_LAZY_LEV1MAP */
/*
* Check to see if the level 1 PTE is valid, and
@ -1890,13 +1821,13 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if ((flags & VM_PROT_ALL) & ~prot)
panic("pmap_enter: access type exceeds prot");
#endif
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (flags & VM_PROT_WRITE)
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
else if (flags & VM_PROT_ALL)
pg->mdpage.pvh_attrs |= PGA_REFERENCED;
attrs = pg->mdpage.pvh_attrs;
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
/*
* Set up referenced/modified emulation for new mapping.
@ -2258,21 +2189,13 @@ pmap_activate(struct lwp *l)
printf("pmap_activate(%p)\n", l);
#endif
#ifndef PMAP_NO_LAZY_LEV1MAP
PMAP_LOCK(pmap);
#endif
/* Mark the pmap in use by this processor. */
atomic_setbits_ulong(&pmap->pm_cpus, (1UL << cpu_id));
atomic_or_ulong(&pmap->pm_cpus, (1UL << cpu_id));
/* Allocate an ASN. */
pmap_asn_alloc(pmap, cpu_id);
PMAP_ACTIVATE(pmap, l, cpu_id);
#ifndef PMAP_NO_LAZY_LEV1MAP
PMAP_UNLOCK(pmap);
#endif
}
/*
@ -2298,7 +2221,7 @@ pmap_deactivate(struct lwp *l)
/*
* Mark the pmap no longer in use by this processor.
*/
atomic_clearbits_ulong(&pmap->pm_cpus, (1UL << cpu_number()));
atomic_and_ulong(&pmap->pm_cpus, ~(1UL << cpu_number()));
}
#if defined(MULTIPROCESSOR)
@ -2448,7 +2371,7 @@ pmap_clear_modify(struct vm_page *pg)
#endif
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (pg->mdpage.pvh_attrs & PGA_MODIFIED) {
rv = true;
@ -2456,7 +2379,7 @@ pmap_clear_modify(struct vm_page *pg)
pg->mdpage.pvh_attrs &= ~PGA_MODIFIED;
}
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (rv);
@ -2479,7 +2402,7 @@ pmap_clear_reference(struct vm_page *pg)
#endif
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (pg->mdpage.pvh_attrs & PGA_REFERENCED) {
rv = true;
@ -2487,7 +2410,7 @@ pmap_clear_reference(struct vm_page *pg)
pg->mdpage.pvh_attrs &= ~PGA_REFERENCED;
}
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (rv);
@ -2835,7 +2758,7 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type)
pg = PHYS_TO_VM_PAGE(pa);
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (type == ALPHA_MMCSR_FOW) {
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
@ -2849,7 +2772,7 @@ pmap_emulate_reference(struct lwp *l, vaddr_t v, int user, int type)
}
pmap_changebit(pg, 0, ~faultoff, cpu_id);
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (0);
}
@ -2868,7 +2791,7 @@ pmap_pv_dump(paddr_t pa)
pg = PHYS_TO_VM_PAGE(pa);
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
printf("pa 0x%lx (attrs = 0x%x):\n", pa, pg->mdpage.pvh_attrs);
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next)
@ -2876,7 +2799,7 @@ pmap_pv_dump(paddr_t pa)
pv->pv_pmap, pv->pv_va);
printf("\n");
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
}
#endif
@ -2935,7 +2858,7 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
newpv->pv_pte = pte;
if (dolock)
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
#ifdef DEBUG
{
@ -2959,7 +2882,7 @@ pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
pg->mdpage.pvh_list = newpv;
if (dolock)
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
return 0;
}
@ -2975,7 +2898,7 @@ pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock)
pv_entry_t pv, *pvp;
if (dolock)
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
/*
* Find the entry to remove.
@ -2993,7 +2916,7 @@ pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock)
*pvp = pv->pv_next;
if (dolock)
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
pmap_pv_free(pv);
}
@ -3050,13 +2973,13 @@ pmap_physpage_alloc(int usage, paddr_t *pap)
pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (pg->wire_count != 0) {
printf("pmap_physpage_alloc: page 0x%lx has "
"%d references\n", pa, pg->wire_count);
panic("pmap_physpage_alloc");
}
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
#endif
*pap = pa;
return (true);
@ -3078,10 +3001,10 @@ pmap_physpage_free(paddr_t pa)
panic("pmap_physpage_free: bogus physical page address");
#ifdef DEBUG
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
if (pg->wire_count != 0)
panic("pmap_physpage_free: page still has references");
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
#endif
uvm_pagefree(pg);
@ -3102,9 +3025,9 @@ pmap_physpage_addref(void *kva)
pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
pg = PHYS_TO_VM_PAGE(pa);
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
rval = ++pg->wire_count;
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
return (rval);
}
@ -3124,7 +3047,7 @@ pmap_physpage_delref(void *kva)
pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
pg = PHYS_TO_VM_PAGE(pa);
simple_lock(&pg->mdpage.pvh_slock);
mutex_enter(&pg->mdpage.pvh_lock);
#ifdef DIAGNOSTIC
/*
@ -3136,7 +3059,7 @@ pmap_physpage_delref(void *kva)
rval = --pg->wire_count;
simple_unlock(&pg->mdpage.pvh_slock);
mutex_exit(&pg->mdpage.pvh_lock);
return (rval);
}
@ -3161,7 +3084,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
if (maxkvaddr <= virtual_end)
goto out; /* we are OK */
simple_lock(&pmap_growkernel_slock);
mutex_enter(&pmap_growkernel_lock);
va = virtual_end;
@ -3194,7 +3117,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
l1idx = l1pte_index(va);
/* Update all the user pmaps. */
simple_lock(&pmap_all_pmaps_slock);
mutex_enter(&pmap_all_pmaps_lock);
for (pm = TAILQ_FIRST(&pmap_all_pmaps);
pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) {
/* Skip the kernel pmap. */
@ -3209,7 +3132,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
pm->pm_lev1map[l1idx] = pte;
PMAP_UNLOCK(pm);
}
simple_unlock(&pmap_all_pmaps_slock);
mutex_exit(&pmap_all_pmaps_lock);
}
/*
@ -3235,7 +3158,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
virtual_end = va;
simple_unlock(&pmap_growkernel_slock);
mutex_exit(&pmap_growkernel_lock);
out:
return (virtual_end);
@ -3275,28 +3198,12 @@ pmap_lev1map_create(pmap_t pmap, long cpu_id)
panic("pmap_lev1map_create: pmap uses non-reserved ASN");
#endif
#ifdef PMAP_NO_LAZY_LEV1MAP
/* Being called from pmap_create() in this case; we can sleep. */
l1pt = pool_cache_get(&pmap_l1pt_cache, PR_WAITOK);
#else
l1pt = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
#endif
if (l1pt == NULL)
return (ENOMEM);
pmap->pm_lev1map = l1pt;
#ifndef PMAP_NO_LAZY_LEV1MAP /* guaranteed not to be active */
/*
* The page table base has changed; if the pmap was active,
* reactivate it.
*/
if (PMAP_ISACTIVE(pmap, cpu_id)) {
pmap_asn_alloc(pmap, cpu_id);
PMAP_ACTIVATE(pmap, curlwp, cpu_id);
}
PMAP_LEV1MAP_SHOOTDOWN(pmap, cpu_id);
#endif /* ! PMAP_NO_LAZY_LEV1MAP */
return (0);
}
@ -3322,31 +3229,6 @@ pmap_lev1map_destroy(pmap_t pmap, long cpu_id)
*/
pmap->pm_lev1map = kernel_lev1map;
#ifndef PMAP_NO_LAZY_LEV1MAP /* pmap is being destroyed */
/*
* The page table base has changed; if the pmap was active,
* reactivate it. Note that allocation of a new ASN is
* not necessary here:
*
* (1) We've gotten here because we've deleted all
* user mappings in the pmap, invalidating the
* TLB entries for them as we go.
*
* (2) kernel_lev1map contains only kernel mappings, which
* were identical in the user pmap, and all of
* those mappings have PG_ASM, so the ASN doesn't
* matter.
*
* We do, however, ensure that the pmap is using the
* reserved ASN, to ensure that no two pmaps never have
* clashing TLB entries.
*/
PMAP_INVALIDATE_ASN(pmap, cpu_id);
if (PMAP_ISACTIVE(pmap, cpu_id))
PMAP_ACTIVATE(pmap, curlwp, cpu_id);
PMAP_LEV1MAP_SHOOTDOWN(pmap, cpu_id);
#endif /* ! PMAP_NO_LAZY_LEV1MAP */
/*
* Free the old level 1 page table page.
*/
@ -3583,15 +3465,7 @@ pmap_l1pt_delref(pmap_t pmap, pt_entry_t *l1pte, long cpu_id)
panic("pmap_l1pt_delref: kernel pmap");
#endif
if (pmap_physpage_delref(l1pte) == 0) {
#ifndef PMAP_NO_LAZY_LEV1MAP
/*
* No more level 2 tables left, go back to the global
* kernel_lev1map.
*/
pmap_lev1map_destroy(pmap, cpu_id);
#endif /* ! PMAP_NO_LAZY_LEV1MAP */
}
(void)pmap_physpage_delref(l1pte);
}
/******************** Address Space Number management ********************/
@ -3623,7 +3497,6 @@ pmap_asn_alloc(pmap_t pmap, long cpu_id)
* have PG_ASM set. If the pmap eventually gets its own
* lev1map, an ASN will be allocated at that time.
*
* #ifdef PMAP_NO_LAZY_LEV1MAP
* Only the kernel pmap will reference kernel_lev1map. Do the
* same old fixups, but note that we no longer need the pmap
* to be locked if we're in this mode, since pm_lev1map will
@ -3747,7 +3620,7 @@ pmap_asn_alloc(pmap_t pmap, long cpu_id)
* Have a new ASN, so there's no need to sync the I-stream
* on the way back out to userspace.
*/
atomic_clearbits_ulong(&pmap->pm_needisync, (1UL << cpu_id));
atomic_and_ulong(&pmap->pm_needisync, ~(1UL << cpu_id));
}
#if defined(MULTIPROCESSOR)
@ -3768,10 +3641,8 @@ pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte, u_long *cpumaskp)
struct cpu_info *ci, *self = curcpu();
u_long cpumask;
CPU_INFO_ITERATOR cii;
int s;
LOCK_ASSERT((pmap == pmap_kernel()) ||
simple_lock_held(&pmap->pm_slock));
KASSERT((pmap == pmap_kernel()) || mutex_owned(&pmap->pm_lock));
cpumask = 0;
@ -3803,7 +3674,7 @@ pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte, u_long *cpumaskp)
pq = &pmap_tlb_shootdown_q[ci->ci_cpuid];
PSJQ_LOCK(pq, s);
mutex_spin_enter(&pq->pq_lock);
pq->pq_pte |= pte;
@ -3812,7 +3683,7 @@ pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte, u_long *cpumaskp)
* don't really have to do anything else.
*/
if (pq->pq_tbia) {
PSJQ_UNLOCK(pq, s);
mutex_spin_exit(&pq->pq_lock);
continue;
}
@ -3832,7 +3703,7 @@ pmap_tlb_shootdown(pmap_t pmap, vaddr_t va, pt_entry_t pte, u_long *cpumaskp)
cpumask |= 1UL << ci->ci_cpuid;
PSJQ_UNLOCK(pq, s);
mutex_spin_exit(&pq->pq_lock);
}
*cpumaskp |= cpumask;
@ -3863,9 +3734,8 @@ pmap_do_tlb_shootdown(struct cpu_info *ci, struct trapframe *framep)
u_long cpu_mask = (1UL << cpu_id);
struct pmap_tlb_shootdown_q *pq = &pmap_tlb_shootdown_q[cpu_id];
struct pmap_tlb_shootdown_job *pj;
int s;
PSJQ_LOCK(pq, s);
mutex_spin_enter(&pq->pq_lock);
if (pq->pq_tbia) {
if (pq->pq_pte & PG_ASM)
@ -3885,7 +3755,7 @@ pmap_do_tlb_shootdown(struct cpu_info *ci, struct trapframe *framep)
pq->pq_pte = 0;
}
PSJQ_UNLOCK(pq, s);
mutex_spin_exit(&pq->pq_lock);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.116 2007/10/17 19:52:56 garbled Exp $ */
/* $NetBSD: trap.c,v 1.117 2008/01/02 11:48:21 ad Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@ -100,7 +100,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.116 2007/10/17 19:52:56 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.117 2008/01/02 11:48:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -263,9 +263,7 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
* and per-process unaligned-access-handling flags).
*/
if (user) {
KERNEL_LOCK(1, l);
i = unaligned_fixup(a0, a1, a2, l);
KERNEL_UNLOCK_LAST(l);
if (i == 0)
goto out;
@ -360,9 +358,7 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
break;
case ALPHA_IF_CODE_OPDEC:
KERNEL_LOCK(1, l);
i = handle_opdec(l, &ucode);
KERNEL_UNLOCK_LAST(l);
KSI_INIT_TRAP(&ksi);
if (i == 0)
goto out;
@ -392,20 +388,10 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
case ALPHA_MMCSR_FOR:
case ALPHA_MMCSR_FOE:
case ALPHA_MMCSR_FOW:
if (user)
KERNEL_LOCK(1, l);
else
KERNEL_LOCK(1, NULL);
if (pmap_emulate_reference(l, a0, user, a1)) {
ftype = VM_PROT_EXECUTE;
goto do_fault;
}
if (user)
KERNEL_UNLOCK_LAST(l);
else
KERNEL_UNLOCK_ONE(NULL);
goto out;
case ALPHA_MMCSR_INVALTRANS:
@ -435,9 +421,7 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
#endif
}
if (user)
KERNEL_LOCK(1, l);
else {
if (!user) {
struct cpu_info *ci = curcpu();
if (l == NULL) {
@ -472,8 +456,6 @@ trap(const u_long a0, const u_long a1, const u_long a2, const u_long entry,
*/
if (ci->ci_intrdepth != 0)
goto dopanic;
KERNEL_LOCK(1, NULL);
}
/*
@ -513,16 +495,10 @@ do_fault:
rv = EFAULT;
}
if (rv == 0) {
if (user)
KERNEL_UNLOCK_LAST(l);
else
KERNEL_UNLOCK_ONE(NULL);
goto out;
}
if (user == 0) {
KERNEL_UNLOCK_ONE(NULL);
/* Check for copyin/copyout fault */
if (l != NULL &&
l->l_addr->u_pcb.pcb_onfault != 0) {
@ -550,7 +526,6 @@ do_fault:
ksi.ksi_code = SEGV_ACCERR;
else
ksi.ksi_code = SEGV_MAPERR;
KERNEL_UNLOCK_LAST(l);
break;
}
@ -567,9 +542,7 @@ do_fault:
#ifdef DEBUG
printtrap(a0, a1, a2, entry, framep, 1, user);
#endif
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
out:
if (user)
userret(l);
@ -675,8 +648,6 @@ ast(struct trapframe *framep)
if (l == NULL)
return;
KERNEL_LOCK(1, l);
uvmexp.softs++;
l->l_md.md_tf = framep;
@ -692,7 +663,6 @@ ast(struct trapframe *framep)
preempt();
}
KERNEL_UNLOCK_LAST(l);
userret(l);
}
@ -1247,6 +1217,5 @@ startlwp(void *arg)
#endif
pool_put(&lwp_uc_pool, uc);
KERNEL_UNLOCK_LAST(l);
userret(l);
}

View File

@ -1,7 +1,7 @@
/* $NetBSD: pmap.h,v 1.68 2007/02/21 22:59:37 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.69 2008/01/02 11:48:21 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
* Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -117,7 +117,7 @@
#include "opt_multiprocessor.h"
#endif
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <machine/pte.h>
@ -144,7 +144,7 @@ struct pmap {
TAILQ_ENTRY(pmap) pm_list; /* list of all pmaps */
pt_entry_t *pm_lev1map; /* level 1 map */
int pm_count; /* pmap reference count */
struct simplelock pm_slock; /* lock on pmap */
kmutex_t pm_lock; /* lock on pmap */
struct pmap_statistics pm_stats; /* pmap statistics */
unsigned long pm_cpus; /* mask of CPUs using pmap */
unsigned long pm_needisync; /* mask of CPUs needing isync */
@ -343,8 +343,8 @@ pmap_l3pte(pmap, v, l2pte)
* operations, locking the kernel pmap is not necessary. Therefore,
* it is not necessary to block interrupts when locking pmap strucutres.
*/
#define PMAP_LOCK(pmap) simple_lock(&(pmap)->pm_slock)
#define PMAP_UNLOCK(pmap) simple_unlock(&(pmap)->pm_slock)
#define PMAP_LOCK(pmap) mutex_enter(&(pmap)->pm_lock)
#define PMAP_UNLOCK(pmap) mutex_exit(&(pmap)->pm_lock)
/*
* Macro for processing deferred I-stream synchronization.

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.29 2005/12/11 12:16:16 christos Exp $ */
/* $NetBSD: vmparam.h,v 1.30 2008/01/02 11:48:21 ad Exp $ */
/*
* Copyright (c) 1992, 1993
@ -166,14 +166,14 @@
#define __HAVE_VM_PAGE_MD
struct vm_page_md {
struct pv_entry *pvh_list; /* pv_entry list */
struct simplelock pvh_slock; /* lock on this head */
kmutex_t pvh_lock; /* lock on this head */
int pvh_attrs; /* page attributes */
};
#define VM_MDPAGE_INIT(pg) \
do { \
(pg)->mdpage.pvh_list = NULL; \
simple_lock_init(&(pg)->mdpage.pvh_slock); \
mutex_init(&(pg)->mdpage.pvh_lock, MUTEX_DEFAULT, IPL_NONE); \
} while (/*CONSTCOND*/0)
#endif /* ! _ALPHA_VMPARAM_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: fpu.c,v 1.20 2007/11/22 16:16:41 bouyer Exp $ */
/* $NetBSD: fpu.c,v 1.21 2008/01/02 11:48:21 ad Exp $ */
/*-
* Copyright (c) 1991 The Regents of the University of California.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.20 2007/11/22 16:16:41 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.21 2008/01/02 11:48:21 ad Exp $");
#include "opt_multiprocessor.h"
@ -179,9 +179,7 @@ fputrap(frame)
ksi.ksi_addr = (void *)frame->tf_rip;
ksi.ksi_code = x86fpflags_to_ksiginfo(statbits);
ksi.ksi_trap = statbits;
KERNEL_LOCK(1, l);
(*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
static int

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.42 2008/01/01 21:28:40 yamt Exp $ */
/* $NetBSD: trap.c,v 1.43 2008/01/02 11:48:22 ad Exp $ */
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.42 2008/01/01 21:28:40 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.43 2008/01/02 11:48:22 ad Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -439,7 +439,6 @@ copyfault:
goto copyefault;
cr2 = rcr2();
KERNEL_LOCK(1, NULL);
goto faultcommon;
case T_PAGEFLT|T_USER: { /* page fault */
@ -453,7 +452,6 @@ copyfault:
if (p->p_emul->e_usertrap != NULL &&
(*p->p_emul->e_usertrap)(l, cr2, frame) != 0)
return;
KERNEL_LOCK(1, l);
faultcommon:
vm = p->p_vmspace;
if (vm == NULL)
@ -496,8 +494,6 @@ faultcommon:
uvm_grow(p, va);
if (type == T_PAGEFLT) {
KERNEL_UNLOCK_ONE(NULL);
/*
* we need to switch pmap now if we're in
* the middle of copyin/out.
@ -511,7 +507,6 @@ faultcommon:
pmap_load();
return;
}
KERNEL_UNLOCK_LAST(l);
goto out;
}
KSI_INIT_TRAP(&ksi);
@ -524,10 +519,8 @@ faultcommon:
ksi.ksi_code = SEGV_MAPERR;
if (type == T_PAGEFLT) {
if (pcb->pcb_onfault != 0) {
KERNEL_UNLOCK_ONE(NULL);
if (pcb->pcb_onfault != 0)
goto copyfault;
}
printf("uvm_fault(%p, 0x%lx, %d) -> %x\n",
map, va, ftype, error);
goto we_re_toast;
@ -547,10 +540,6 @@ faultcommon:
ksi.ksi_signo = SIGSEGV;
}
(*p->p_emul->e_trapsignal)(l, &ksi);
if (type == T_PAGEFLT)
KERNEL_UNLOCK_ONE(NULL);
else
KERNEL_UNLOCK_LAST(l);
break;
}
@ -580,9 +569,7 @@ faultcommon:
ksi.ksi_code = TRAP_BRKPT;
else
ksi.ksi_code = TRAP_TRACE;
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
break;
@ -621,9 +608,7 @@ out:
userret(l);
return;
trapsignal:
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
userret(l);
}
@ -636,9 +621,6 @@ startlwp(void *arg)
err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
pool_put(&lwp_uc_pool, uc);
KERNEL_UNLOCK_LAST(l);
userret(l);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.55 2007/10/17 19:53:12 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.56 2008/01/02 11:48:22 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.55 2007/10/17 19:53:12 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.56 2008/01/02 11:48:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -190,7 +190,7 @@ readdisklabel(dev, strat, lp, clp)
bp->b_blkno = nextb;
bp->b_cylinder = bp->b_blkno / lp->d_secpercyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
#ifdef SD_C_ADJUSTS_NR
bp->b_blkno *= (lp->d_secsize / DEV_BSIZE);
@ -307,7 +307,7 @@ readdisklabel(dev, strat, lp, clp)
bp->b_blkno = nextb;
bp->b_cylinder = bp->b_blkno / lp->d_secpercyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
#ifdef SD_C_ADJUSTS_NR
bp->b_blkno *= (lp->d_secsize / DEV_BSIZE);
@ -571,7 +571,8 @@ writedisklabel(dev, strat, lp, clp)
dlp = (struct disklabel *)((char*)bp->b_data + LABELOFFSET);
*dlp = *lp; /* struct assignment */
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.77 2007/10/17 19:53:15 garbled Exp $ */
/* $NetBSD: fd.c,v 1.78 2008/01/02 11:48:22 ad Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.77 2007/10/17 19:53:15 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.78 2008/01/02 11:48:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -912,7 +912,8 @@ fdputdisklabel(struct fd_softc *sc, dev_t dev)
bcopy(lp, dlp, sizeof(struct disklabel));
bp->b_blkno = 0;
bp->b_cylinder = 0;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~(B_READ);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_WRITE;
fdstrategy(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.27 2007/10/17 19:53:27 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.28 2008/01/02 11:48:22 ad Exp $ */
/* $OpenBSD: disksubr.c,v 1.14 1997/05/08 00:14:29 deraadt Exp $ */
/* NetBSD: disksubr.c,v 1.40 1999/05/06 15:45:51 christos Exp */
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.27 2007/10/17 19:53:27 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.28 2008/01/02 11:48:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -270,7 +270,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -321,7 +321,7 @@ nombrpart:
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -475,7 +475,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -489,7 +489,8 @@ nombrpart:
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.19 2007/10/17 19:53:30 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.20 2008/01/02 11:48:23 ad Exp $ */
/*
* Copyright (c) 1998 Christopher G. Demetriou. All rights reserved.
@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.19 2007/10/17 19:53:30 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.20 2008/01/02 11:48:23 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -224,7 +224,7 @@ readdisklabel(dev, strat, lp, osdep)
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -376,7 +376,7 @@ writedisklabel(dev, strat, lp, osdep)
bp->b_blkno = netbsdpartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -390,7 +390,8 @@ writedisklabel(dev, strat, lp, osdep)
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~(B_READ);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr_acorn.c,v 1.7 2007/10/17 19:53:30 garbled Exp $ */
/* $NetBSD: disksubr_acorn.c,v 1.8 2008/01/02 11:48:23 ad Exp $ */
/*
* Copyright (c) 1998 Christopher G. Demetriou. All rights reserved.
@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr_acorn.c,v 1.7 2007/10/17 19:53:30 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr_acorn.c,v 1.8 2008/01/02 11:48:23 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -259,7 +259,7 @@ filecore_label_read(dev, strat, lp, osdep, msgp, cylp, netbsd_label_offp)
bp->b_blkno);*/
bp->b_cylinder = bp->b_blkno / lp->d_secpercyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.34 2007/10/17 19:53:45 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.35 2008/01/02 11:48:23 ad Exp $ */
/*
* Copyright (c) 1995 Leo Weppelman.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.34 2007/10/17 19:53:45 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.35 2008/01/02 11:48:23 ad Exp $");
#ifndef DISKLABEL_NBDA
#define DISKLABEL_NBDA /* required */
@ -234,7 +234,8 @@ writedisklabel(dev, strat, lp, clp)
bb->bb_magic = (blk == 0) ? NBDAMAGIC : AHDIMAGIC;
BBSETLABEL(bb, lp);
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
bp->b_bcount = BBMINSIZE;
bp->b_blkno = blk;

View File

@ -1,4 +1,4 @@
/* $NetBSD: hdfd.c,v 1.59 2007/10/17 19:53:47 garbled Exp $ */
/* $NetBSD: hdfd.c,v 1.60 2008/01/02 11:48:23 ad Exp $ */
/*-
* Copyright (c) 1996 Leo Weppelman
@ -91,7 +91,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hdfd.c,v 1.59 2007/10/17 19:53:47 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: hdfd.c,v 1.60 2008/01/02 11:48:23 ad Exp $");
#include "opt_ddb.h"
@ -1531,11 +1531,12 @@ fdformat(dev, finfo, p)
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = (struct buf *)malloc(sizeof(struct buf), M_TEMP, M_NOWAIT);
bp = getiobuf(NULL, false);
if(bp == 0)
return ENOBUFS;
bzero((void *)bp, sizeof(struct buf));
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_cflags |= BC_BUSY;
bp->b_proc = p;
bp->b_dev = dev;
@ -1557,13 +1558,13 @@ fdformat(dev, finfo, p)
fdstrategy(bp);
/* ...and wait for it to complete */
s = splbio();
while(!(bp->b_flags & B_DONE)) {
rv = tsleep((void *)bp, PRIBIO, "fdform", 20 * hz);
mutex_enter(bp->b_objlock);
while(!(bp->b_oflags & BO_DONE)) {
rv = cv_timedwait(&bp->b_done, 20 * hz);
if (rv == EWOULDBLOCK)
break;
}
splx(s);
mutex_exit(bp->b_objlock);
if (rv == EWOULDBLOCK) {
/* timed out */
@ -1572,7 +1573,7 @@ fdformat(dev, finfo, p)
} else if (bp->b_error != 0) {
rv = bp->b_error;
}
free(bp, M_TEMP);
putiobuf(bp);
return rv;
}

View File

@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: md_root.c,v 1.24 2007/10/17 19:53:47 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: md_root.c,v 1.25 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -157,7 +157,7 @@ struct md_conf *md;
dev_t ld_dev;
struct lwp *lwp;
{
struct buf buf;
struct buf *buf;
int error;
const struct bdevsw *bdp;
struct disklabel dl;
@ -170,17 +170,16 @@ struct lwp *lwp;
/*
* Initialize our buffer header:
*/
memset(&buf, 0, sizeof(buf));
buf.b_vnbufs.le_next = NOLIST;
buf.b_flags = B_BUSY;
buf.b_dev = ld_dev;
buf.b_error = 0;
buf.b_proc = lwp->l_proc;
buf = getiobuf(NULL, false);
buf->b_cflags = BC_BUSY;
buf->b_dev = ld_dev;
buf->b_error = 0;
buf->b_proc = lwp->l_proc;
/*
* Setup read_info:
*/
rs.bp = &buf;
rs.bp = buf;
rs.nbytes = md->md_size;
rs.offset = 0;
rs.bufp = md->md_addr;
@ -192,8 +191,10 @@ struct lwp *lwp;
/*
* Open device and try to get some statistics.
*/
if((error = bdp->d_open(ld_dev, FREAD | FNONBLOCK, 0, lwp)) != 0)
if((error = bdp->d_open(ld_dev, FREAD | FNONBLOCK, 0, lwp)) != 0) {
putiobuf(buf);
return(error);
}
if(bdp->d_ioctl(ld_dev, DIOCGDINFO, (void *)&dl, FREAD, lwp) == 0) {
/* Read on a cylinder basis */
rs.chunk = dl.d_secsize * dl.d_secpercyl;
@ -208,6 +209,7 @@ struct lwp *lwp;
error = ramd_norm_read(&rs);
bdp->d_close(ld_dev,FREAD | FNONBLOCK, 0, lwp);
putiobuf(buf);
return(error);
}
@ -218,7 +220,6 @@ struct read_info *rsp;
long bytes_left;
int done, error;
struct buf *bp;
int s;
int dotc = 0;
bytes_left = rsp->nbytes;
@ -226,9 +227,8 @@ struct read_info *rsp;
error = 0;
while(bytes_left > 0) {
s = splbio();
bp->b_flags = B_BUSY | B_PHYS | B_READ;
splx(s);
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_READ;
bp->b_blkno = btodb(rsp->offset);
bp->b_bcount = rsp->chunk;
bp->b_data = rsp->bufp;
@ -238,10 +238,7 @@ struct read_info *rsp;
(*rsp->strat)(bp);
/* Wait for results */
s = splbio();
while ((bp->b_flags & B_DONE) == 0)
tsleep((void *) bp, PRIBIO + 1, "ramd_norm_read", 0);
splx(s);
biowait(bp);
error = bp->b_error;
/* Dot counter */
@ -300,7 +297,6 @@ int nbyte;
static int dotc = 0;
struct buf *bp;
int nread = 0;
int s;
int done, error;
@ -309,9 +305,8 @@ int nbyte;
nbyte &= ~(DEV_BSIZE - 1);
while(nbyte > 0) {
s = splbio();
bp->b_flags = B_BUSY | B_PHYS | B_READ;
splx(s);
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_READ;
bp->b_blkno = btodb(rsp->offset);
bp->b_bcount = min(rsp->chunk, nbyte);
bp->b_data = buf;
@ -321,11 +316,7 @@ int nbyte;
(*rsp->strat)(bp);
/* Wait for results */
s = splbio();
while ((bp->b_flags & B_DONE) == 0)
tsleep((void *) bp, PRIBIO + 1, "ramd_norm_read", 0);
error = bp->b_error;
splx(s);
biowait(bp);
/* Dot counter */
printf(".");
@ -348,8 +339,6 @@ int nbyte;
rsp->offset = 0;
}
}
s = splbio();
splx(s);
return(nread);
}
#endif /* support_compression */

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.22 2007/10/17 19:54:08 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:24 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.22 2007/10/17 19:54:08 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -190,7 +190,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -227,7 +227,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -366,7 +366,7 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -380,7 +380,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.16 2007/10/17 19:54:15 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:24 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2007/10/17 19:54:15 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -111,7 +111,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -216,7 +216,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
dlp = (struct disklabel *)((char *)bp->b_data + LABELOFFSET);
*dlp = *lp; /* struct assignment */
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.15 2007/10/17 19:54:17 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.16 2008/01/02 11:48:24 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.15 2007/10/17 19:54:17 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -121,7 +121,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~BO_DONE;
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -246,7 +246,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags = B_BUSY | B_WRITE;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_WRITE;
(*strat)(bp);
error = biowait(bp);
goto done;

View File

@ -1,4 +1,4 @@
/* $NetBSD: sector.c,v 1.5 2007/10/17 19:54:21 garbled Exp $ */
/* $NetBSD: sector.c,v 1.6 2008/01/02 11:48:24 ad Exp $ */
/*-
* Copyright (c) 2004 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sector.c,v 1.5 2007/10/17 19:54:21 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: sector.c,v 1.6 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -98,7 +98,7 @@ sector_read(void *self, uint8_t *buf, daddr_t sector)
b->b_blkno = sector;
b->b_cylinder = sector / 100;
b->b_bcount = DEV_BSIZE;
b->b_flags &= ~(B_DONE);
b->b_oflags &= ~(BO_DONE);
b->b_flags |= B_READ;
rw->strategy(b);
@ -134,7 +134,8 @@ sector_write(void *self, uint8_t *buf, daddr_t sector)
b->b_blkno = sector;
b->b_cylinder = sector / 100;
b->b_bcount = DEV_BSIZE;
b->b_flags &= ~(B_READ | B_DONE);
b->b_flags &= ~(B_READ);
b->b_oflags &= ~(BO_DONE);
b->b_flags |= B_WRITE;
memcpy(b->b_data, buf, DEV_BSIZE);
rw->strategy(b);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ct.c,v 1.51 2007/10/17 19:54:22 garbled Exp $ */
/* $NetBSD: ct.c,v 1.52 2008/01/02 11:48:24 ad Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -82,7 +82,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ct.c,v 1.51 2007/10/17 19:54:22 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: ct.c,v 1.52 2008/01/02 11:48:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -468,7 +468,7 @@ ctcommand(dev_t dev, int cmd, int cnt)
}
while (cnt-- > 0) {
bp->b_flags = B_BUSY;
bp->b_cflags = BC_BUSY;
if (cmd == MTBSF) {
sc->sc_blkno = sc->sc_eofs[sc->sc_eofp];
sc->sc_eofp--;

View File

@ -1,4 +1,4 @@
/* $NetBSD: mt.c,v 1.40 2007/10/17 19:54:23 garbled Exp $ */
/* $NetBSD: mt.c,v 1.41 2008/01/02 11:48:25 ad Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mt.c,v 1.40 2007/10/17 19:54:23 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: mt.c,v 1.41 2008/01/02 11:48:25 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -435,13 +435,14 @@ mtcommand(dev_t dev, int cmd, int cnt)
int error = 0;
#if 1
if (bp->b_flags & B_BUSY)
if (bp->b_cflags & BC_BUSY)
return EBUSY;
#endif
bp->b_cmd = cmd;
bp->b_dev = dev;
do {
bp->b_flags = B_BUSY | B_CMD;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_CMD;
mtstrategy(bp);
biowait(bp);
if (bp->b_error != 0) {
@ -450,9 +451,9 @@ mtcommand(dev_t dev, int cmd, int cnt)
}
} while (--cnt > 0);
#if 0
bp->b_flags = 0 /*&= ~B_BUSY*/;
bp->b_flags = 0 /*&= ~BC_BUSY*/;
#else
bp->b_flags &= ~B_BUSY;
bp->b_flags &= ~BC_BUSY;
#endif
return error;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.26 2007/10/17 19:54:23 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.27 2008/01/02 11:48:25 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.26 2007/10/17 19:54:23 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.27 2008/01/02 11:48:25 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -179,7 +179,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.23 2007/10/17 19:54:26 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.24 2008/01/02 11:48:25 ad Exp $ */
/* $OpenBSD: disksubr.c,v 1.6 2000/10/18 21:00:34 mickey Exp $ */
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.23 2007/10/17 19:54:26 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.24 2008/01/02 11:48:25 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -101,7 +101,8 @@ readbsdlabel(struct buf *bp, void (*strat)(struct buf *), int cyl, int sec,
bp->b_blkno = sec;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
(*strat)(bp);
/* if successful, locate disk label within block and validate */
@ -208,7 +209,8 @@ readliflabel(struct buf *bp, void (*strat)(struct buf *), struct disklabel *lp,
/* read LIF volume header */
bp->b_blkno = btodb(HP700_LIF_VOLSTART);
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_cylinder = btodb(HP700_LIF_VOLSTART) / lp->d_secpercyl;
(*strat)(bp);
@ -231,7 +233,8 @@ readliflabel(struct buf *bp, void (*strat)(struct buf *), struct disklabel *lp,
/* read LIF directory */
dbp->b_blkno = btodb(HP700_LIF_DIRSTART);
dbp->b_bcount = lp->d_secsize;
dbp->b_flags = B_BUSY | B_READ;
dbp->b_cflags = BC_BUSY;
dbp->b_flags = B_READ;
dbp->b_cylinder = (HP700_LIF_DIRSTART) / lp->d_secpercyl;
(*strat)(dbp);
@ -367,7 +370,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
*(struct disklabel *)((char *)bp->b_data + labeloffset) = *lp;
bp->b_flags = B_BUSY | B_WRITE;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.19 2007/10/17 19:54:27 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.20 2008/01/02 11:48:25 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.19 2007/10/17 19:54:27 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.20 2008/01/02 11:48:25 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -239,7 +239,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -276,7 +276,7 @@ nombrpart:
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -424,7 +424,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -438,7 +438,8 @@ nombrpart:
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.230 2008/01/01 21:28:40 yamt Exp $ */
/* $NetBSD: trap.c,v 1.231 2008/01/02 11:48:25 ad Exp $ */
/*-
* Copyright (c) 1998, 2000, 2005 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.230 2008/01/01 21:28:40 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.231 2008/01/02 11:48:25 ad Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -472,25 +472,21 @@ copyfault:
return;
case T_PROTFLT|T_USER: /* protection fault */
KERNEL_LOCK(1, l);
#ifdef VM86
if (frame->tf_eflags & PSL_VM) {
vm86_gpfault(l, type & ~T_USER);
KERNEL_UNLOCK_LAST(l);
goto out;
}
#endif
/* If pmap_exec_fixup does something, let's retry the trap. */
if (pmap_exec_fixup(&p->p_vmspace->vm_map, frame,
&l->l_addr->u_pcb)) {
KERNEL_UNLOCK_LAST(l);
goto out;
}
KSI_INIT_TRAP(&ksi);
ksi.ksi_signo = SIGSEGV;
ksi.ksi_addr = (void *)rcr2();
ksi.ksi_code = SEGV_ACCERR;
KERNEL_UNLOCK_LAST(l);
goto trapsignal;
case T_TSSFLT|T_USER:
@ -621,7 +617,6 @@ copyfault:
#endif /* defined(XEN) && !defined(XEN3) */
cr2 = FETCH_CR2;
KERNEL_LOCK(1, NULL);
goto faultcommon;
case T_PAGEFLT|T_USER: { /* page fault */
@ -632,7 +627,6 @@ copyfault:
extern struct vm_map *kernel_map;
cr2 = FETCH_CR2;
KERNEL_LOCK(1, l);
faultcommon:
vm = p->p_vmspace;
if (vm == NULL)
@ -673,8 +667,6 @@ copyfault:
uvm_grow(p, va);
if (type == T_PAGEFLT) {
KERNEL_UNLOCK_ONE(NULL);
/*
* we need to switch pmap now if we're in
* the middle of copyin/out.
@ -692,7 +684,6 @@ copyfault:
}
return;
}
KERNEL_UNLOCK_LAST(l);
goto out;
}
KSI_INIT_TRAP(&ksi);
@ -707,10 +698,8 @@ copyfault:
if (type == T_PAGEFLT) {
onfault = onfault_handler(pcb, frame);
if (onfault != NULL) {
KERNEL_UNLOCK_ONE(NULL);
if (onfault != NULL)
goto copyfault;
}
printf("uvm_fault(%p, %#lx, %d) -> %#x\n",
map, va, ftype, error);
goto we_re_toast;
@ -725,11 +714,6 @@ copyfault:
ksi.ksi_signo = SIGSEGV;
}
(*p->p_emul->e_trapsignal)(l, &ksi);
if (type != T_PAGEFLT) {
KERNEL_UNLOCK_LAST(l);
} else {
KERNEL_UNLOCK_ONE(NULL);
}
break;
}
@ -761,9 +745,7 @@ copyfault:
else
ksi.ksi_code = TRAP_TRACE;
ksi.ksi_addr = (void *)frame->tf_eip;
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
break;
@ -811,9 +793,7 @@ out:
return;
trapsignal:
ksi.ksi_trap = type & ~T_USER;
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
userret(l);
}
@ -835,7 +815,5 @@ startlwp(arg)
}
#endif
pool_put(&lwp_uc_pool, uc);
KERNEL_UNLOCK_LAST(l);
userret(l);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.56 2007/10/17 19:55:14 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.57 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.56 2007/10/17 19:55:14 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.57 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -553,7 +553,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.42 2007/10/17 19:55:32 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.43 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -106,7 +106,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.42 2007/10/17 19:55:32 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.43 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -695,8 +695,9 @@ writedisklabel(dev, strat, lp, osdep)
if (error != 0)
goto done;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~B_READ;
bp->b_flags |= B_WRITE;
bp->b_oflags &= ~BO_DONE;
memcpy((char *)bp->b_data + osdep->cd_labeloffset, (void *)lp,
sizeof *lp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.22 2007/12/24 15:06:38 ad Exp $ */
/* $NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.22 2007/12/24 15:06:38 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -226,7 +226,8 @@ writedisklabel(dev, strat, lp, clp)
goto ioerror;
/* Write MIPS RISC/os label to first sector */
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~(B_READ);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_WRITE;
(*strat)(bp);
if ((error = biowait(bp)) != 0)
@ -238,7 +239,8 @@ writedisklabel(dev, strat, lp, clp)
bp->b_blkno = LABELSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_cylinder = bp->b_blkno / lp->d_secpercyl;
bp->b_flags &= ~(B_READ | B_DONE);
bp->b_flags &= ~(B_READ);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.30 2007/10/17 19:55:53 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.31 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.30 2007/10/17 19:55:53 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.31 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -178,7 +178,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.27 2007/10/17 19:55:55 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.28 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.27 2007/10/17 19:55:55 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.28 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -178,7 +178,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_flags &= ~(B_READ);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.15 2007/10/17 19:56:14 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.16 2008/01/02 11:48:26 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.15 2007/10/17 19:56:14 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2008/01/02 11:48:26 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -188,7 +188,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -224,7 +224,7 @@ nombrpart:
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -363,7 +363,7 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -376,7 +376,8 @@ nombrpart:
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.47 2007/10/17 19:56:15 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.48 2008/01/02 11:48:27 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.47 2007/10/17 19:56:15 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.48 2008/01/02 11:48:27 ad Exp $");
#include "opt_compat_ultrix.h"
@ -281,7 +281,8 @@ writedisklabel(dev, strat, lp, osdep)
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.c,v 1.19 2007/12/03 15:34:11 ad Exp $ */
/* $NetBSD: intr.c,v 1.20 2008/01/02 11:48:27 ad Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.19 2007/12/03 15:34:11 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.20 2008/01/02 11:48:27 ad Exp $");
#include <sys/param.h>
#include <sys/malloc.h>
@ -256,13 +256,15 @@ ext_intr(void)
disable_irq(i);
wrteei(1);
KERNEL_LOCK(1, NULL);
ih = intrs[i].is_head;
while (ih) {
if (ih->ih_level == IPL_VM)
KERNEL_LOCK(1, NULL);
(*ih->ih_fun)(ih->ih_arg);
ih = ih->ih_next;
if (ih->ih_level == IPL_VM)
KERNEL_UNLOCK_ONE(NULL);
}
KERNEL_UNLOCK_ONE(NULL);
mtmsr(msr);
if (intrs[i].is_type == IST_LEVEL)

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.47 2007/11/28 12:22:28 simonb Exp $ */
/* $NetBSD: trap.c,v 1.48 2008/01/02 11:48:27 ad Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.47 2007/11/28 12:22:28 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.48 2008/01/02 11:48:27 ad Exp $");
#include "opt_altivec.h"
#include "opt_ddb.h"
@ -166,9 +166,7 @@ trap(struct trapframe *frame)
ksi.ksi_signo = SIGTRAP;
ksi.ksi_trap = EXC_TRC;
ksi.ksi_addr = (void *)frame->srr0;
KERNEL_LOCK(1, l);
trapsignal(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
/*
@ -183,7 +181,6 @@ trap(struct trapframe *frame)
vaddr_t va;
struct faultbuf *fb = NULL;
KERNEL_LOCK(1, NULL);
va = frame->dar;
if (frame->tf_xtra[TF_PID] == KERNEL_PID) {
map = kernel_map;
@ -200,7 +197,6 @@ trap(struct trapframe *frame)
(ftype & VM_PROT_WRITE) ? "write" : "read",
(void *)va, frame->tf_xtra[TF_ESR]));
rv = uvm_fault(map, trunc_page(va), ftype);
KERNEL_UNLOCK_ONE(NULL);
if (rv == 0)
goto done;
if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
@ -221,8 +217,6 @@ trap(struct trapframe *frame)
case EXC_DSI|EXC_USER:
/* FALLTHROUGH */
case EXC_DTMISS|EXC_USER:
KERNEL_LOCK(1, l);
if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
ftype = VM_PROT_WRITE;
@ -234,7 +228,6 @@ trap(struct trapframe *frame)
rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
ftype);
if (rv == 0) {
KERNEL_UNLOCK_LAST(l);
break;
}
KSI_INIT_TRAP(&ksi);
@ -250,12 +243,10 @@ trap(struct trapframe *frame)
ksi.ksi_signo = SIGKILL;
}
trapsignal(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
case EXC_ITMISS|EXC_USER:
case EXC_ISI|EXC_USER:
KERNEL_LOCK(1, l);
ftype = VM_PROT_EXECUTE;
DBPRINTF(TDB_ALL,
("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
@ -263,7 +254,6 @@ trap(struct trapframe *frame)
rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
ftype);
if (rv == 0) {
KERNEL_UNLOCK_LAST(l);
break;
}
KSI_INIT_TRAP(&ksi);
@ -272,7 +262,6 @@ trap(struct trapframe *frame)
ksi.ksi_addr = (void *)frame->srr0;
ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
trapsignal(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
case EXC_AST|EXC_USER:
@ -289,7 +278,6 @@ trap(struct trapframe *frame)
case EXC_ALI|EXC_USER:
KERNEL_LOCK(1, l);
if (fix_unaligned(l, frame) != 0) {
KSI_INIT_TRAP(&ksi);
ksi.ksi_signo = SIGBUS;
@ -298,7 +286,6 @@ trap(struct trapframe *frame)
trapsignal(l, &ksi);
} else
frame->srr0 += 4;
KERNEL_UNLOCK_LAST(l);
break;
case EXC_PGM|EXC_USER:
@ -320,9 +307,7 @@ trap(struct trapframe *frame)
ksi.ksi_signo = rv;
ksi.ksi_trap = EXC_PGM;
ksi.ksi_addr = (void *)frame->srr0;
KERNEL_LOCK(1, l);
trapsignal(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.49 2007/12/15 00:39:23 perry Exp $ */
/* $NetBSD: pmap.c,v 1.50 2008/01/02 11:48:27 ad Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.49 2007/12/15 00:39:23 perry Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.50 2008/01/02 11:48:27 ad Exp $");
#include "opt_ppcarch.h"
#include "opt_altivec.h"
@ -83,6 +83,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.49 2007/12/15 00:39:23 perry Exp $");
#include <sys/queue.h>
#include <sys/device.h> /* for evcnt */
#include <sys/systm.h>
#include <sys/atomic.h>
#if __NetBSD_Version__ < 105010000
#include <vm/vm.h>
@ -515,6 +516,8 @@ mfsrin(vaddr_t va)
extern void mfmsr64 (register64_t *result);
#endif /* PPC_OEA64_BRIDGE */
#define PMAP_LOCK() KERNEL_LOCK(1, NULL)
#define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
static inline register_t
pmap_interrupts_off(void)
@ -942,6 +945,8 @@ pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
volatile struct pteg *pteg;
volatile struct pte *pt;
PMAP_LOCK();
ptegidx = va_to_pteg(pm, addr);
/*
@ -1015,6 +1020,7 @@ pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
TAILQ_REMOVE(pvoh, pvo, pvo_olink);
TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
}
PMAP_UNLOCK();
return 1;
}
source_pvo = pvo;
@ -1040,6 +1046,7 @@ pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
if (source_pvo == NULL) {
PMAPCOUNT(ptes_unspilled);
PMAP_UNLOCK();
return 0;
}
@ -1112,6 +1119,8 @@ pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
PMAP_PVO_CHECK(victim_pvo);
PMAP_PVO_CHECK(source_pvo);
PMAP_UNLOCK();
return 1;
}
@ -1228,6 +1237,7 @@ pmap_pinit(pmap_t pm)
* Allocate some segment registers for this pmap.
*/
pm->pm_refs = 1;
PMAP_LOCK();
for (i = 0; i < NPMAPS; i += VSID_NBPW) {
static register_t pmap_vsidcontext;
register_t hash;
@ -1267,8 +1277,10 @@ pmap_pinit(pmap_t pm)
pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
SR_NOEXEC;
#endif
PMAP_UNLOCK();
return;
}
PMAP_UNLOCK();
panic("pmap_pinit: out of segments");
}
@ -1278,7 +1290,7 @@ pmap_pinit(pmap_t pm)
void
pmap_reference(pmap_t pm)
{
pm->pm_refs++;
atomic_inc_uint(&pm->pm_refs);
}
/*
@ -1288,7 +1300,7 @@ pmap_reference(pmap_t pm)
void
pmap_destroy(pmap_t pm)
{
if (--pm->pm_refs == 0) {
if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
pmap_release(pm);
pool_put(&pmap_pool, pm);
}
@ -1306,6 +1318,7 @@ pmap_release(pmap_t pm)
KASSERT(pm->pm_stats.resident_count == 0);
KASSERT(pm->pm_stats.wired_count == 0);
PMAP_LOCK();
if (pm->pm_sr[0] == 0)
panic("pmap_release");
idx = pm->pm_vsid & (NPMAPS-1);
@ -1314,6 +1327,7 @@ pmap_release(pmap_t pm)
KASSERT(pmap_vsid_bitmap[idx] & mask);
pmap_vsid_bitmap[idx] &= ~mask;
PMAP_UNLOCK();
}
/*
@ -1471,6 +1485,8 @@ pmap_pvo_check(const struct pvo_entry *pvo)
volatile struct pte *pt;
int failed = 0;
PMAP_LOCK();
if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
panic("pmap_pvo_check: pvo %p: invalid address", pvo);
@ -1563,6 +1579,8 @@ pmap_pvo_check(const struct pvo_entry *pvo)
if (failed)
panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
pvo->pvo_pmap);
PMAP_UNLOCK();
}
#endif /* DEBUG || PMAPCHECK */
@ -1942,6 +1960,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
u_int pvo_flags;
u_int was_exec = 0;
PMAP_LOCK();
if (__predict_false(!pmap_initialized)) {
pvo_head = &pmap_pvo_kunmanaged;
pl = &pmap_upvo_pool;
@ -2035,6 +2055,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
DPRINTFN(ENTER, (": error=%d\n", error));
PMAP_UNLOCK();
return error;
}
@ -2054,6 +2076,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
DPRINTFN(KENTER,
("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
PMAP_LOCK();
/*
* Assume the page is cache inhibited and access is guarded unless
* it's in our available memory array. If it is in the memory array,
@ -2083,6 +2107,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
if (error != 0)
panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
va, pa, error);
PMAP_UNLOCK();
}
void
@ -2107,6 +2133,7 @@ pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
register_t msr;
int pteidx;
PMAP_LOCK();
LIST_INIT(&pvol);
msr = pmap_interrupts_off();
for (; va < endva; va += PAGE_SIZE) {
@ -2117,6 +2144,7 @@ pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
}
pmap_interrupts_restore(msr);
pmap_pvo_free_list(&pvol);
PMAP_UNLOCK();
}
/*
@ -2128,6 +2156,7 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
struct pvo_entry *pvo;
register_t msr;
PMAP_LOCK();
/*
* If this is a kernel pmap lookup, also check the battable
@ -2149,6 +2178,7 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
(~(batu & BAT_BL) << 15) & ~0x1ffffL;
if (pap)
*pap = (batl & mask) | (va & ~mask);
PMAP_UNLOCK();
return true;
}
} else {
@ -2161,14 +2191,17 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
(~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
if (pap)
*pap = (batl & mask) | (va & ~mask);
PMAP_UNLOCK();
return true;
} else if (SR601_VALID_P(sr) &&
SR601_PA_MATCH_P(sr, va)) {
if (pap)
*pap = va;
PMAP_UNLOCK();
return true;
}
}
PMAP_UNLOCK();
return false;
#elif defined (PPC_OEA64_BRIDGE)
panic("%s: pm: %s, va: 0x%08lx\n", __func__,
@ -2187,6 +2220,7 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
| (va & ADDR_POFF);
}
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
return pvo != NULL;
}
@ -2216,6 +2250,8 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
return;
}
PMAP_LOCK();
msr = pmap_interrupts_off();
for (; va < endva; va += PAGE_SIZE) {
pvo = pmap_pvo_find_va(pm, va, &pteidx);
@ -2261,6 +2297,7 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
}
void
@ -2269,6 +2306,7 @@ pmap_unwire(pmap_t pm, vaddr_t va)
struct pvo_entry *pvo;
register_t msr;
PMAP_LOCK();
msr = pmap_interrupts_off();
pvo = pmap_pvo_find_va(pm, va, NULL);
if (pvo != NULL) {
@ -2279,6 +2317,7 @@ pmap_unwire(pmap_t pm, vaddr_t va)
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
}
/*
@ -2292,6 +2331,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
volatile struct pte *pt;
register_t msr;
PMAP_LOCK();
KASSERT(prot != VM_PROT_ALL);
LIST_INIT(&pvol);
msr = pmap_interrupts_off();
@ -2356,6 +2397,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
pmap_interrupts_restore(msr);
pmap_pvo_free_list(&pvol);
PMAP_UNLOCK();
}
/*
@ -2401,8 +2444,12 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
volatile struct pte *pt;
register_t msr;
if (pmap_attr_fetch(pg) & ptebit)
PMAP_LOCK();
if (pmap_attr_fetch(pg) & ptebit) {
PMAP_UNLOCK();
return true;
}
msr = pmap_interrupts_off();
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
@ -2415,6 +2462,7 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
pmap_attr_save(pg, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
return true;
}
}
@ -2438,11 +2486,13 @@ pmap_query_bit(struct vm_page *pg, int ptebit)
pmap_attr_save(pg, ptebit);
PMAP_PVO_CHECK(pvo); /* sanity check */
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
return true;
}
}
}
pmap_interrupts_restore(msr);
PMAP_UNLOCK();
return false;
}
@ -2455,6 +2505,7 @@ pmap_clear_bit(struct vm_page *pg, int ptebit)
register_t msr;
int rv = 0;
PMAP_LOCK();
msr = pmap_interrupts_off();
/*
@ -2523,6 +2574,7 @@ pmap_clear_bit(struct vm_page *pg, int ptebit)
PMAPCOUNT(exec_synced_clear_modify);
}
}
PMAP_UNLOCK();
return (rv & ptebit) != 0;
}
@ -2533,6 +2585,7 @@ pmap_procwr(struct proc *p, vaddr_t va, size_t len)
size_t offset = va & ADDR_POFF;
int s;
PMAP_LOCK();
s = splvm();
while (len > 0) {
size_t seglen = PAGE_SIZE - offset;
@ -2549,6 +2602,7 @@ pmap_procwr(struct proc *p, vaddr_t va, size_t len)
offset = 0;
}
splx(s);
PMAP_UNLOCK();
}
#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
@ -2822,15 +2876,19 @@ pmap_pool_ualloc(struct pool *pp, int flags)
{
struct pvo_page *pvop;
if (uvm.page_init_done != true) {
return (void *) uvm_pageboot_alloc(PAGE_SIZE);
}
PMAP_LOCK();
pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
if (pvop != NULL) {
pmap_upvop_free--;
SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
PMAP_UNLOCK();
return pvop;
}
if (uvm.page_init_done != true) {
return (void *) uvm_pageboot_alloc(PAGE_SIZE);
}
PMAP_UNLOCK();
return pmap_pool_malloc(pp, flags);
}
@ -2840,12 +2898,15 @@ pmap_pool_malloc(struct pool *pp, int flags)
struct pvo_page *pvop;
struct vm_page *pg;
PMAP_LOCK();
pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
if (pvop != NULL) {
pmap_mpvop_free--;
SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
PMAP_UNLOCK();
return pvop;
}
PMAP_UNLOCK();
again:
pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
@ -2870,11 +2931,13 @@ pmap_pool_ufree(struct pool *pp, void *va)
return;
}
#endif
PMAP_LOCK();
pvop = va;
SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
pmap_upvop_free++;
if (pmap_upvop_free > pmap_upvop_maxfree)
pmap_upvop_maxfree = pmap_upvop_free;
PMAP_UNLOCK();
}
void
@ -2882,11 +2945,13 @@ pmap_pool_mfree(struct pool *pp, void *va)
{
struct pvo_page *pvop;
PMAP_LOCK();
pvop = va;
SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
pmap_mpvop_free++;
if (pmap_mpvop_free > pmap_mpvop_maxfree)
pmap_mpvop_maxfree = pmap_mpvop_free;
PMAP_UNLOCK();
#if 0
uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: syscall.c,v 1.36 2007/11/05 20:43:04 ad Exp $ */
/* $NetBSD: syscall.c,v 1.37 2008/01/02 11:48:27 ad Exp $ */
/*
* Copyright (C) 2002 Matt Thomas
@ -60,7 +60,7 @@
#define EMULNAME(x) (x)
#define EMULNAMEU(x) (x)
__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.36 2007/11/05 20:43:04 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: syscall.c,v 1.37 2008/01/02 11:48:27 ad Exp $");
void
child_return(void *arg)
@ -135,11 +135,9 @@ EMULNAME(syscall_plain)(struct trapframe *frame)
if (argsize > n * sizeof(register_t)) {
memcpy(args, params, n * sizeof(register_t));
KERNEL_LOCK(1, l);
error = copyin(MOREARGS(frame->fixreg[1]),
args + n,
argsize - n * sizeof(register_t));
KERNEL_UNLOCK_LAST(l);
if (error)
goto bad;
params = args;

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.122 2007/10/24 14:50:39 ad Exp $ */
/* $NetBSD: trap.c,v 1.123 2008/01/02 11:48:27 ad Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.122 2007/10/24 14:50:39 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.123 2008/01/02 11:48:27 ad Exp $");
#include "opt_altivec.h"
#include "opt_ddb.h"
@ -112,9 +112,7 @@ trap(struct trapframe *frame)
ksi.ksi_trap = EXC_TRC;
ksi.ksi_addr = (void *)frame->srr0;
ksi.ksi_code = TRAP_TRACE;
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
break;
case EXC_DSI: {
@ -126,7 +124,6 @@ trap(struct trapframe *frame)
* Only query UVM if no interrupts are active.
*/
if (ci->ci_intrdepth < 0) {
KERNEL_LOCK(1, NULL);
if ((va >> ADDR_SR_SHFT) == pcb->pcb_kmapsr) {
va &= ADDR_PIDX | ADDR_POFF;
va |= pcb->pcb_umapsr << ADDR_SR_SHFT;
@ -137,8 +134,6 @@ trap(struct trapframe *frame)
vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map),
trunc_page(va), false)) {
/* KERNEL_UNLOCK_LAST(l); */
KERNEL_UNLOCK_ONE(NULL);
return;
}
#endif
@ -147,8 +142,6 @@ trap(struct trapframe *frame)
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map),
trunc_page(va), false)) {
/* KERNEL_UNLOCK_LAST(l); */
KERNEL_UNLOCK_ONE(NULL);
return;
}
#if defined(DIAGNOSTIC) && (defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE))
@ -180,9 +173,7 @@ trap(struct trapframe *frame)
*/
if (rv == 0)
uvm_grow(p, trunc_page(va));
/* KERNEL_UNLOCK_LAST(l); */
}
KERNEL_UNLOCK_ONE(NULL);
if (rv == 0)
return;
if (rv == EACCES)
@ -210,7 +201,6 @@ trap(struct trapframe *frame)
goto brain_damage2;
}
case EXC_DSI|EXC_USER:
KERNEL_LOCK(1, l);
ci->ci_ev_udsi.ev_count++;
if (frame->dsisr & DSISR_STORE)
ftype = VM_PROT_WRITE;
@ -228,7 +218,6 @@ trap(struct trapframe *frame)
vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map), trunc_page(frame->dar),
false)) {
KERNEL_UNLOCK_LAST(l);
break;
}
#endif
@ -237,7 +226,6 @@ trap(struct trapframe *frame)
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->dar),
false)) {
KERNEL_UNLOCK_LAST(l);
break;
}
@ -247,7 +235,6 @@ trap(struct trapframe *frame)
* Record any stack growth...
*/
uvm_grow(p, trunc_page(frame->dar));
KERNEL_UNLOCK_LAST(l);
break;
}
ci->ci_ev_udsi_fatal.ev_count++;
@ -273,7 +260,6 @@ trap(struct trapframe *frame)
ksi.ksi_signo = SIGKILL;
}
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
case EXC_ISI:
@ -284,7 +270,6 @@ trap(struct trapframe *frame)
goto brain_damage2;
case EXC_ISI|EXC_USER:
KERNEL_LOCK(1, l);
ci->ci_ev_isi.ev_count++;
/*
@ -297,7 +282,6 @@ trap(struct trapframe *frame)
if (vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map), trunc_page(frame->srr0),
true)) {
KERNEL_UNLOCK_LAST(l);
break;
}
#endif
@ -305,14 +289,12 @@ trap(struct trapframe *frame)
if (vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0),
true)) {
KERNEL_UNLOCK_LAST(l);
break;
}
ftype = VM_PROT_EXECUTE;
rv = uvm_fault(map, trunc_page(frame->srr0), ftype);
if (rv == 0) {
KERNEL_UNLOCK_LAST(l);
break;
}
ci->ci_ev_isi_fatal.ev_count++;
@ -327,7 +309,6 @@ trap(struct trapframe *frame)
ksi.ksi_addr = (void *)frame->srr0;
ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
case EXC_FPU|EXC_USER:
@ -340,7 +321,6 @@ trap(struct trapframe *frame)
case EXC_AST|EXC_USER:
ci->ci_astpending = 0; /* we are about to do it */
KERNEL_LOCK(1, l);
uvmexp.softs++;
if (l->l_pflag & LP_OWEUPC) {
l->l_flag &= ~LP_OWEUPC;
@ -349,11 +329,9 @@ trap(struct trapframe *frame)
/* Check whether we are being preempted. */
if (ci->ci_want_resched)
preempt();
KERNEL_UNLOCK_LAST(l);
break;
case EXC_ALI|EXC_USER:
KERNEL_LOCK(1, l);
ci->ci_ev_ali.ev_count++;
if (fix_unaligned(l, frame) != 0) {
ci->ci_ev_ali_fatal.ev_count++;
@ -371,7 +349,6 @@ trap(struct trapframe *frame)
(*p->p_emul->e_trapsignal)(l, &ksi);
} else
frame->srr0 += 4;
KERNEL_UNLOCK_LAST(l);
break;
case EXC_PERF|EXC_USER:
@ -384,7 +361,6 @@ trap(struct trapframe *frame)
enable_vec();
break;
#else
KERNEL_LOCK(1, l);
if (cpu_printfataltraps) {
printf("trap: pid %d.%d (%s): user VEC trap @ %#lx "
"(SRR1=%#lx)\n",
@ -397,7 +373,6 @@ trap(struct trapframe *frame)
ksi.ksi_addr = (void *)frame->srr0;
ksi.ksi_code = ILL_ILLOPC;
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
#endif
case EXC_MCHK|EXC_USER:
@ -412,15 +387,12 @@ trap(struct trapframe *frame)
ksi.ksi_trap = EXC_MCHK;
ksi.ksi_addr = (void *)frame->srr0;
ksi.ksi_code = BUS_OBJERR;
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
break;
case EXC_PGM|EXC_USER:
ci->ci_ev_pgm.ev_count++;
if (frame->srr1 & 0x00020000) { /* Bit 14 is set if trap */
KERNEL_LOCK(1, l);
if (p->p_raslist == NULL ||
ras_lookup(p, (void *)frame->srr0) == (void *) -1) {
KSI_INIT_TRAP(&ksi);
@ -433,7 +405,6 @@ trap(struct trapframe *frame)
/* skip the trap instruction */
frame->srr0 += 4;
}
KERNEL_UNLOCK_LAST(l);
} else {
KSI_INIT_TRAP(&ksi);
ksi.ksi_signo = SIGILL;
@ -454,9 +425,7 @@ trap(struct trapframe *frame)
printf("trap: pid %d.%d (%s): user PGM trap @"
" %#lx (SRR1=%#lx)\n", p->p_pid, l->l_lid,
p->p_comm, frame->srr0, frame->srr1);
KERNEL_LOCK(1, l);
(*p->p_emul->e_trapsignal)(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
break;
@ -918,6 +887,5 @@ startlwp(void *arg)
}
#endif
pool_put(&lwp_uc_pool, uc);
KERNEL_UNLOCK_LAST(l);
userret(l, frame);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.16 2007/10/17 19:56:59 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:28 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2007/10/17 19:56:59 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:28 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -177,7 +177,8 @@ readdisklabel(dev, strat, lp, osdep)
/* read master boot record */
bp->b_blkno = MBR_BBSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl;
(*strat)(bp);
@ -246,7 +247,8 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
(*strat)(bp);
/* if successful, locate disk label within block and validate */
@ -282,7 +284,8 @@ nombrpart:
i = 0;
do {
/* read a bad sector table */
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
bp->b_blkno *= lp->d_secsize / DEV_BSIZE;
@ -401,7 +404,8 @@ writedisklabel(dev, strat, lp, osdep)
/* read master boot record */
bp->b_blkno = MBR_BBSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl;
(*strat)(bp);
@ -434,7 +438,8 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
(*strat)(bp);
/* if successful, locate disk label within block and validate */
@ -447,7 +452,8 @@ nombrpart:
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags = B_BUSY | B_WRITE;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_WRITE;
(*strat)(bp);
error = biowait(bp);
goto done;

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.16 2007/10/17 19:57:02 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:28 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.16 2007/10/17 19:57:02 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.17 2008/01/02 11:48:28 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -146,7 +146,8 @@ readdisklabel(dev, strat, lp, osdep)
/* read master boot record */
bp->b_blkno = MBR_BBSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags |= B_READ;
bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl;
(*strat)(bp);
@ -194,7 +195,8 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
(*strat)(bp);
/* if successful, locate disk label within block and validate */
@ -230,7 +232,8 @@ nombrpart:
i = 0;
do {
/* read a bad sector table */
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
bp->b_blkno *= lp->d_secsize / DEV_BSIZE;
@ -349,7 +352,8 @@ writedisklabel(dev, strat, lp, osdep)
/* read master boot record */
bp->b_blkno = MBR_BBSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl;
(*strat)(bp);
@ -373,7 +377,8 @@ nombrpart:
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags = B_BUSY | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_READ;
(*strat)(bp);
/* if successful, locate disk label within block and validate */
@ -386,7 +391,8 @@ nombrpart:
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags = B_BUSY | B_WRITE;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_WRITE;
(*strat)(bp);
error = biowait(bp);
goto done;

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.22 2007/12/24 15:06:38 ad Exp $ */
/* $NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:28 ad Exp $ */
/*
* Copyright (c) 2001 Christopher Sekiya
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.22 2007/12/24 15:06:38 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.23 2008/01/02 11:48:28 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -197,7 +197,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp, str
goto ioerror;
/* Write sgimips label to first sector */
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
if ((error = biowait(bp)) != 0)
@ -209,7 +210,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp, str
bp->b_blkno = LABELSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_cylinder = bp->b_blkno / lp->d_secpercyl;
bp->b_flags &= ~(B_READ | B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.26 2007/10/17 19:57:07 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.27 2008/01/02 11:48:28 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.26 2007/10/17 19:57:07 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.27 2008/01/02 11:48:28 ad Exp $");
#include "opt_mbr.h"
@ -410,7 +410,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -485,7 +485,7 @@ readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEV_BSIZE)
@ -633,7 +633,7 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
bp->b_blkno = dospartoff + LABELSECTOR;
bp->b_cylinder = cyl;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
(*strat)(bp);
@ -681,7 +681,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp,
goto done;
found:
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.138 2007/11/27 21:56:06 ad Exp $ */
/* $NetBSD: fd.c,v 1.139 2008/01/02 11:48:28 ad Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -108,7 +108,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.138 2007/11/27 21:56:06 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.139 2008/01/02 11:48:28 ad Exp $");
#include "opt_ddb.h"
#include "opt_md.h"
@ -2139,12 +2139,13 @@ fdformat(dev_t dev, struct ne7_fd_formb *finfo, struct proc *p)
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = getiobuf_nowait();
bp = getiobuf(NULL, false);
if (bp == NULL)
return (ENOBUFS);
bp->b_vp = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_proc = p;
bp->b_dev = dev;
@ -2322,14 +2323,13 @@ fd_read_md_image(size_t *sizep, void * *addrp)
bp->b_error = 0;
bp->b_resid = 0;
bp->b_proc = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_RAW | B_READ;
bp->b_cflags |= BC_BUSY;
bp->b_flags = B_PHYS | B_RAW | B_READ;
bp->b_blkno = btodb(offset);
bp->b_bcount = DEV_BSIZE;
bp->b_data = addr;
fdstrategy(bp);
while ((bp->b_flags & B_DONE) == 0) {
tsleep((void *)bp, PRIBIO + 1, "physio", 0);
}
biowait(bp);
if (bp->b_error)
panic("fd: mountroot: fdread error %d", bp->b_error);

View File

@ -1,4 +1,4 @@
/* $NetBSD: mutex.h,v 1.6 2007/11/21 10:19:08 yamt Exp $ */
/* $NetBSD: mutex.h,v 1.7 2008/01/02 11:48:28 ad Exp $ */
/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
@ -89,6 +89,8 @@ struct kmutex {
#else /* __MUTEX_PRIVATE */
#include <machine/lock.h>
struct kmutex {
union {
/* Adaptive mutex */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.81 2007/10/17 19:57:13 garbled Exp $ */
/* $NetBSD: pmap.h,v 1.82 2008/01/02 11:48:29 ad Exp $ */
/*
* Copyright (c) 1996
@ -143,7 +143,6 @@ struct pmap {
union ctxinfo *pm_ctx; /* current context, if any */
int pm_ctxnum; /* current context's number */
u_int pm_cpuset; /* CPU's this pmap has context on */
struct simplelock pm_lock; /* spinlock */
int pm_refcount; /* just what it says */
struct mmuhd pm_reglist; /* MMU regions on this pmap (4/4c) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.38 2006/02/07 16:55:31 chs Exp $ */
/* $NetBSD: vmparam.h,v 1.39 2008/01/02 11:48:29 ad Exp $ */
/*
* Copyright (c) 1992, 1993
@ -142,7 +142,6 @@ struct vm_page_md {
vaddr_t pv_va; /* virtual address */
int pv_flags; /* flags (below) */
} pvlisthead;
struct simplelock pv_slock;
};
#define VM_MDPAGE_PVHEAD(pg) (&(pg)->mdpage.pvlisthead)
@ -151,7 +150,6 @@ struct vm_page_md {
(pg)->mdpage.pvlisthead.pv_pmap = NULL; \
(pg)->mdpage.pvlisthead.pv_va = 0; \
(pg)->mdpage.pvlisthead.pv_flags = 0; \
simple_lock_init(&(pg)->mdpage.pv_slock); \
} while(/*CONSTCOND*/0)
#endif /* _SPARC_VMPARAM_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.321 2007/11/16 23:46:20 martin Exp $ */
/* $NetBSD: pmap.c,v 1.322 2008/01/02 11:48:29 ad Exp $ */
/*
* Copyright (c) 1996
@ -56,12 +56,10 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.321 2007/11/16 23:46:20 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.322 2008/01/02 11:48:29 ad Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include "opt_sparc_arch.h"
#include <sys/param.h>
@ -69,12 +67,12 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.321 2007/11/16 23:46:20 martin Exp $");
#include <sys/device.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/pool.h>
#include <sys/exec.h>
#include <sys/core.h>
#include <sys/kcore.h>
#include <sys/kernel.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
@ -176,58 +174,8 @@ paddr_t vm_first_phys = (paddr_t)-1;
paddr_t vm_last_phys = 0;
psize_t vm_num_phys;
/*
* Locking:
*
* This pmap module uses two types of locks: `normal' (sleep)
* locks and `simple' (spin) locks. They are used as follows:
*
* READ/WRITE SPIN LOCKS
* ---------------------
*
* * pmap_main_lock - This lock is used to prevent deadlock and/or
* provide mutex access to the pmap module. Most operations lock
* the pmap first, then PV lists as needed. However, some operations,
* such as pmap_page_protect(), lock the PV lists before locking
* the pmaps. To prevent deadlock, we require a mutex lock on the
* pmap module if locking in the PV->pmap direction. This is
* implemented by acquiring a (shared) read lock on pmap_main_lock
* if locking pmap->PV and a (exclusive) write lock if locking in
* the PV->pmap direction. Since only one thread can hold a write
* lock at a time, this provides the mutex.
*
* SIMPLE LOCKS
* ------------
*
* * pm_slock (per-pmap) - This lock protects all of the members
* of the pmap structure itself. Note that in the case of the
* kernel pmap, interrupts which cause memory allocation *must*
* be blocked while this lock is asserted.
*
* * pv_slock (per-vm_page) - This lock protects the PV list
* for a specified managed page.
*
* All internal functions which operate on a pmap are called
* with the pmap already locked by the caller (which will be
* an interface function).
*/
/* struct lock pmap_main_lock; */
#if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
#define PMAP_MAP_TO_HEAD_LOCK() \
spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
#define PMAP_MAP_TO_HEAD_UNLOCK() \
spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
#define PMAP_HEAD_TO_MAP_LOCK() \
spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
#define PMAP_HEAD_TO_MAP_UNLOCK() \
spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
#else
#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
#endif /* MULTIPROCESSOR || LOCKDEBUG */
#define PMAP_LOCK() KERNEL_LOCK(1, NULL)
#define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
/*
* Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
@ -365,6 +313,8 @@ struct mmuq region_freelist, region_lru, region_locked;
int seginval; /* [4/4c] the invalid segment number */
int reginval; /* [4/3mmu] the invalid region number */
static kmutex_t demap_lock;
/*
* (sun4/4c)
* A context is simply a small number that dictates which set of 4096
@ -387,7 +337,7 @@ union ctxinfo {
struct pmap *c_pmap; /* pmap (if busy) */
};
static struct simplelock ctx_lock; /* lock for below */
static kmutex_t ctx_lock; /* lock for below */
union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
union ctxinfo *ctx_freelist; /* context free list */
int ctx_kick; /* allocation rover when none free */
@ -823,8 +773,6 @@ VA2PA(void *addr)
* PTE at the same time we are. This is the procedure that is
* recommended in the SuperSPARC user's manual.
*/
static struct simplelock demap_lock = SIMPLELOCK_INITIALIZER;
int
updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset)
{
@ -835,7 +783,7 @@ updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset)
* Can only be one of these happening in the system
* at any one time.
*/
simple_lock(&demap_lock);
mutex_spin_enter(&demap_lock);
/*
* The idea is to loop swapping zero into the pte, flushing
@ -854,7 +802,7 @@ updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset)
swapval = (oldval & ~bic) | bis;
swap(vpte, swapval);
simple_unlock(&demap_lock);
mutex_spin_exit(&demap_lock);
return (oldval);
}
@ -1732,7 +1680,6 @@ me_alloc(struct mmuq *mh, struct pmap *newpm, int newvreg, int newvseg)
} while (--i > 0);
/* update segment tables */
simple_lock(&pm->pm_lock);
if (CTX_USABLE(pm,rp)) {
va = VSTOVA(me->me_vreg,me->me_vseg);
if (pm != pmap_kernel() || HASSUN4_MMU3L)
@ -1749,7 +1696,6 @@ me_alloc(struct mmuq *mh, struct pmap *newpm, int newvreg, int newvseg)
/* off old pmap chain */
TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
simple_unlock(&pm->pm_lock);
setcontext4(ctx);
/* onto new pmap chain; new pmap is already locked, if needed */
@ -1897,14 +1843,12 @@ region_alloc(struct mmuq *mh, struct pmap *newpm, int newvr)
}
/* update region tables */
simple_lock(&pm->pm_lock); /* what if other CPU takes mmuentry ?? */
if (pm->pm_ctx)
setregmap(VRTOVA(me->me_vreg), reginval);
rp->rg_smeg = reginval;
/* off old pmap chain */
TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
simple_unlock(&pm->pm_lock);
setcontext4(ctx); /* done with old context */
/* onto new pmap chain; new pmap is already locked, if needed */
@ -2055,6 +1999,8 @@ mmu_pagein(struct pmap *pm, vaddr_t va, int prot)
struct regmap *rp;
struct segmap *sp;
PMAP_LOCK();
if (prot != VM_PROT_NONE)
bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
else
@ -2065,8 +2011,10 @@ mmu_pagein(struct pmap *pm, vaddr_t va, int prot)
rp = &pm->pm_regmap[vr];
/* return 0 if we have no PMEGs to load */
if (rp->rg_nsegmap == 0)
if (rp->rg_nsegmap == 0) {
PMAP_UNLOCK();
return (0);
}
#ifdef DIAGNOSTIC
if (rp->rg_segmap == NULL)
@ -2080,14 +2028,19 @@ mmu_pagein(struct pmap *pm, vaddr_t va, int prot)
sp = &rp->rg_segmap[vs];
/* return 0 if we have no PTEs to load */
if (sp->sg_npte == 0)
if (sp->sg_npte == 0) {
PMAP_UNLOCK();
return (0);
}
/* return -1 if the fault is `hard', 0 if not */
if (sp->sg_pmeg != seginval)
if (sp->sg_pmeg != seginval) {
PMAP_UNLOCK();
return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
}
mmu_pagein_seg(pm, sp, va, vr, vs, &segm_lru);
PMAP_UNLOCK();
return (1);
}
#endif /* SUN4 or SUN4C */
@ -2103,7 +2056,7 @@ void
ctx_alloc(struct pmap *pm)
{
union ctxinfo *c;
int s, cnum, i, doflush;
int cnum, i, doflush;
struct regmap *rp;
int gap_start, gap_end;
vaddr_t va;
@ -2120,8 +2073,7 @@ ctx_alloc(struct pmap *pm)
gap_end = pm->pm_gap_end;
}
s = splvm();
simple_lock(&ctx_lock);
mutex_spin_enter(&ctx_lock);
if ((c = ctx_freelist) != NULL) {
ctx_freelist = c->c_nextfree;
cnum = c - ctxinfo;
@ -2152,7 +2104,6 @@ ctx_alloc(struct pmap *pm)
gap_end = c->c_pmap->pm_gap_end;
}
}
simple_unlock(&ctx_lock);
c->c_pmap = pm;
pm->pm_ctx = c;
@ -2181,7 +2132,6 @@ ctx_alloc(struct pmap *pm)
*/
setcontext4(cnum);
splx(s);
if (doflush)
cache_flush_context(cnum);
@ -2255,7 +2205,6 @@ ctx_alloc(struct pmap *pm)
* Note on multi-threaded processes: a context must remain
* valid as long as any thread is still running on a CPU.
*/
simple_lock(&pm->pm_lock);
#if defined(MULTIPROCESSOR)
for (i = 0; i < sparc_ncpus; i++)
#else
@ -2271,18 +2220,17 @@ ctx_alloc(struct pmap *pm)
(pm->pm_reg_ptps_pa[i] >> SRMMU_PPNPASHIFT) |
SRMMU_TEPTD);
}
simple_unlock(&pm->pm_lock);
/* And finally switch to the new context */
(*cpuinfo.pure_vcache_flush)();
setcontext4m(cnum);
#endif /* SUN4M || SUN4D */
splx(s);
}
mutex_spin_exit(&ctx_lock);
}
/*
* Give away a context. Always called in the context of proc0 (reaper)
* Give away a context.
*/
void
ctx_free(struct pmap *pm)
@ -2303,6 +2251,8 @@ ctx_free(struct pmap *pm)
}
#endif /* SUN4 || SUN4C */
mutex_spin_enter(&ctx_lock);
#if defined(SUN4M) || defined(SUN4D)
if (CPU_HAS_SRMMU) {
int i;
@ -2325,10 +2275,9 @@ ctx_free(struct pmap *pm)
}
#endif
simple_lock(&ctx_lock);
c->c_nextfree = ctx_freelist;
ctx_freelist = c;
simple_unlock(&ctx_lock);
mutex_spin_exit(&ctx_lock);
}
@ -2704,7 +2653,7 @@ pv_syncflags4m(struct vm_page *pg)
int tpte;
s = splvm();
PMAP_HEAD_TO_MAP_LOCK();
PMAP_LOCK();
pv = VM_MDPAGE_PVHEAD(pg);
if (pv->pv_pmap == NULL) {
/* Page not mapped; pv_flags is already up to date */
@ -2712,11 +2661,9 @@ pv_syncflags4m(struct vm_page *pg)
goto out;
}
simple_lock(&pg->mdpage.pv_slock);
flags = pv->pv_flags;
for (; pv != NULL; pv = pv->pv_next) {
pm = pv->pv_pmap;
simple_lock(&pm->pm_lock);
va = pv->pv_va;
rp = &pm->pm_regmap[VA_VREG(va)];
sp = &rp->rg_segmap[VA_VSEG(va)];
@ -2736,14 +2683,11 @@ pv_syncflags4m(struct vm_page *pg)
SRMMU_PG_M | SRMMU_PG_R,
0, pm->pm_ctxnum, PMAP_CPUSET(pm)));
}
simple_unlock(&pm->pm_lock);
}
VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
simple_unlock(&pg->mdpage.pv_slock);
out:
PMAP_HEAD_TO_MAP_UNLOCK();
PMAP_UNLOCK();
splx(s);
return (flags);
}
@ -2758,7 +2702,6 @@ pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
pv0 = VM_MDPAGE_PVHEAD(pg);
simple_lock(&pg->mdpage.pv_slock);
npv = pv0->pv_next;
/*
* First entry is special (sigh).
@ -2785,7 +2728,7 @@ pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
*/
pv0->pv_pmap = NULL;
pv0->pv_flags &= ~(PV_NC|PV_ANC);
goto out;
return;
}
} else {
struct pvlist *prev;
@ -2795,7 +2738,7 @@ pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
if (npv == NULL) {
panic("pv_unlink: pm %p is missing on pg %p",
pm, pg);
goto out;
return;
}
if (npv->pv_pmap == pm && npv->pv_va == va)
break;
@ -2813,7 +2756,7 @@ pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next)
if (BADALIAS(va, npv->pv_va) ||
(npv->pv_flags & PV_NC) != 0)
goto out;
return;
#ifdef DEBUG
if (pmapdebug & PDB_CACHESTUFF)
printf(
@ -2824,9 +2767,6 @@ pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
pv0->pv_flags &= ~PV_ANC;
pv_changepte4m(pg, SRMMU_PG_C, 0);
}
out:
simple_unlock(&pg->mdpage.pv_slock);
}
/*
@ -2843,7 +2783,6 @@ pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va,
int error = 0;
pv0 = VM_MDPAGE_PVHEAD(pg);
simple_lock(&pg->mdpage.pv_slock);
if (pv0->pv_pmap == NULL) {
/* no pvlist entries yet */
@ -2914,7 +2853,6 @@ link_npv:
pv0->pv_next = npv;
out:
simple_unlock(&pg->mdpage.pv_slock);
return (error);
}
#endif
@ -2932,8 +2870,7 @@ pv_uncache(struct vm_page *pg)
int s;
s = splvm();
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pv_slock);
PMAP_LOCK();
for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next)
pv->pv_flags |= PV_NC;
@ -2946,8 +2883,7 @@ pv_uncache(struct vm_page *pg)
if (CPU_HAS_SUNMMU)
pv_changepte4_4c(pg, PG_NC, 0);
#endif
simple_unlock(&pg->mdpage.pv_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
PMAP_UNLOCK();
splx(s);
}
@ -3050,12 +2986,6 @@ pmap_bootstrap(int nctx, int nregion, int nsegment)
nptesg = (NBPSG >> pgshift);
#endif
/*
* Initialize the locks.
*/
/* spinlockinit(&pmap_main_lock, "pmaplk", 0); */
simple_lock_init(&kernel_pmap_store.pm_lock);
/*
* Grab physical memory list.
*/
@ -3279,12 +3209,14 @@ pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment)
p = i; /* retract to first free phys */
mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
/*
* All contexts are free except the kernel's.
*
* XXX sun4c could use context 0 for users?
*/
simple_lock_init(&ctx_lock);
mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
ci->c_pmap = pmap_kernel();
ctx_freelist = ci + 1;
for (i = 1; i < ncontext; i++) {
@ -3767,10 +3699,12 @@ pmap_bootstrap4m(void *top)
p = q; /* retract to first free phys */
mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
/*
* Set up the ctxinfo structures (freelist of contexts)
*/
simple_lock_init(&ctx_lock);
mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
ci->c_pmap = pmap_kernel();
ctx_freelist = ci + 1;
for (i = 1; i < ncontext; i++) {
@ -4238,7 +4172,6 @@ pmap_pmap_pool_ctor(void *arg, void *object, int flags)
qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap));
/* pm->pm_ctx = NULL; // already done */
simple_lock_init(&pm->pm_lock);
if (CPU_HAS_SUNMMU) {
TAILQ_INIT(&pm->pm_seglist);
@ -4376,16 +4309,12 @@ pmap_create(void)
void
pmap_destroy(struct pmap *pm)
{
int count;
#ifdef DEBUG
if (pmapdebug & PDB_DESTROY)
printf("pmap_destroy[%d](%p)\n", cpu_number(), pm);
#endif
simple_lock(&pm->pm_lock);
count = --pm->pm_refcount;
simple_unlock(&pm->pm_lock);
if (count == 0) {
if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) {
#ifdef DEBUG
pmap_quiet_check(pm);
#endif
@ -4399,11 +4328,8 @@ pmap_destroy(struct pmap *pm)
void
pmap_reference(struct pmap *pm)
{
int s = splvm();
simple_lock(&pm->pm_lock);
pm->pm_refcount++;
simple_unlock(&pm->pm_lock);
splx(s);
atomic_inc_uint(&pm->pm_refcount);
}
#if defined(SUN4) || defined(SUN4C)
@ -4584,8 +4510,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
ctx = getcontext();
s = splvm(); /* XXX conservative */
PMAP_MAP_TO_HEAD_LOCK();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
for (; va < endva; va = nva) {
/* do one virtual segment at a time */
vr = VA_VREG(va);
@ -4596,8 +4521,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
if (pm->pm_regmap[vr].rg_nsegmap != 0)
(*rm)(pm, va, nva, vr, vs);
}
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
setcontext(ctx);
}
@ -5104,7 +5028,7 @@ pmap_protect4_4c(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
write_user_windows();
ctx = getcontext4();
s = splvm();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
for (va = sva; va < eva;) {
vr = VA_VREG(va);
vs = VA_VSEG(va);
@ -5175,7 +5099,7 @@ pmap_protect4_4c(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
}
}
}
simple_unlock(&pm->pm_lock);
PMAP_UNLOCK();
splx(s);
setcontext4(ctx);
}
@ -5292,8 +5216,7 @@ pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
cpu_number(), VM_PAGE_TO_PHYS(pg), prot);
#endif
s = splvm();
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pv_slock);
PMAP_LOCK();
if (prot & VM_PROT_READ) {
pv_changepte4m(pg, 0, PPROT_WRITE);
@ -5314,7 +5237,6 @@ pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
flags = pv->pv_flags & ~(PV_NC|PV_ANC);
while (pv != NULL) {
pm = pv->pv_pmap;
simple_lock(&pm->pm_lock);
va = pv->pv_va;
vr = VA_VREG(va);
vs = VA_VSEG(va);
@ -5361,7 +5283,6 @@ pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
npv = pv->pv_next;
if (pv != VM_MDPAGE_PVHEAD(pg))
pool_put(&pv_pool, pv);
simple_unlock(&pm->pm_lock);
pv = npv;
}
@ -5371,8 +5292,7 @@ pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
out:
simple_unlock(&pg->mdpage.pv_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
PMAP_UNLOCK();
splx(s);
}
@ -5406,8 +5326,7 @@ pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
write_user_windows();
s = splvm();
PMAP_MAP_TO_HEAD_LOCK();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
for (va = sva; va < eva;) {
vr = VA_VREG(va);
@ -5456,8 +5375,7 @@ pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
PMAP_CPUSET(pm));
}
}
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
}
@ -6006,7 +5924,7 @@ pmap_kremove4_4c(vaddr_t va, vsize_t len)
s = splvm();
ctx = getcontext();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
setcontext4(0);
for (; va < endva; va = nva) {
/* do one virtual segment at a time */
@ -6086,7 +6004,7 @@ pmap_kremove4_4c(vaddr_t va, vsize_t len)
mmu_pmeg_unlock(sp->sg_pmeg);
}
}
simple_unlock(&pm->pm_lock);
PMAP_UNLOCK();
setcontext4(ctx);
splx(s);
}
@ -6215,8 +6133,7 @@ pmap_enk4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
sp = &rp->rg_segmap[vs];
s = splvm(); /* XXX way too conservative */
PMAP_MAP_TO_HEAD_LOCK();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
if (rp->rg_seg_ptps == NULL) /* enter new region */
panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
@ -6229,8 +6146,7 @@ pmap_enk4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
/* just changing protection and/or wiring */
pmap_changeprot4m(pm, va, prot, flags);
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
return (0);
}
@ -6276,8 +6192,7 @@ printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
pm->pm_stats.resident_count++;
out:
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
return (error);
}
@ -6304,8 +6219,7 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
s = splvm(); /* XXX conservative */
PMAP_MAP_TO_HEAD_LOCK();
simple_lock(&pm->pm_lock);
PMAP_LOCK();
if (rp->rg_segmap == NULL) {
/* definitely a new mapping */
@ -6409,8 +6323,7 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
/* just changing prot and/or wiring */
/* caller should call this directly: */
pmap_changeprot4m(pm, va, prot, flags);
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
return (0);
}
@ -6479,8 +6392,7 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
}
out:
simple_unlock(&pm->pm_lock);
PMAP_MAP_TO_HEAD_UNLOCK();
PMAP_UNLOCK();
splx(s);
return (error);
}
@ -6733,8 +6645,9 @@ pmap_extract4m(struct pmap *pm, vaddr_t va, paddr_t *pap)
* requires interrupt protection.
*/
s = splvm();
if (pm != pmap_kernel())
simple_lock(&pm->pm_lock);
if (pm != pmap_kernel()) {
PMAP_LOCK();
}
rp = &pm->pm_regmap[vr];
if (rp->rg_segmap == NULL) {
@ -6766,9 +6679,9 @@ pmap_extract4m(struct pmap *pm, vaddr_t va, paddr_t *pap)
* the middle of the PTE update protocol. So, acquire the
* demap lock and retry.
*/
simple_lock(&demap_lock);
mutex_spin_enter(&demap_lock);
pte = sp->sg_pte[VA_SUN4M_VPG(va)];
simple_unlock(&demap_lock);
mutex_spin_exit(&demap_lock);
if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
goto out;
}
@ -6783,8 +6696,9 @@ pmap_extract4m(struct pmap *pm, vaddr_t va, paddr_t *pap)
v = true;
out:
if (pm != pmap_kernel())
simple_unlock(&pm->pm_lock);
if (pm != pmap_kernel()) {
PMAP_UNLOCK();
}
splx(s);
return (v);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.172 2007/03/04 06:00:47 christos Exp $ */
/* $NetBSD: trap.c,v 1.173 2008/01/02 11:48:29 ad Exp $ */
/*
* Copyright (c) 1996
@ -49,7 +49,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.172 2007/03/04 06:00:47 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.173 2008/01/02 11:48:29 ad Exp $");
#include "opt_ddb.h"
#include "opt_compat_svr4.h"
@ -417,9 +417,7 @@ badtrap:
#endif
if (fs == NULL) {
KERNEL_LOCK(1, l);
fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK);
KERNEL_UNLOCK_LAST(l);
*fs = initfpstate;
l->l_md.md_fpstate = fs;
}
@ -495,12 +493,10 @@ badtrap:
}
case T_WINOF:
KERNEL_LOCK(1, l);
if (rwindow_save(l)) {
mutex_enter(&p->p_smutex);
sigexit(l, SIGILL);
}
KERNEL_UNLOCK_LAST(l);
break;
#define read_rw(src, dst) \
@ -515,7 +511,6 @@ badtrap:
* nsaved to -1. If we decide to deliver a signal on
* our way out, we will clear nsaved.
*/
KERNEL_LOCK(1, l);
if (pcb->pcb_uw || pcb->pcb_nsaved)
panic("trap T_RWRET 1");
#ifdef DEBUG
@ -531,7 +526,6 @@ badtrap:
if (pcb->pcb_nsaved)
panic("trap T_RWRET 2");
pcb->pcb_nsaved = -1; /* mark success */
KERNEL_UNLOCK_LAST(l);
break;
case T_WINUF:
@ -544,7 +538,6 @@ badtrap:
* in the pcb. The restore's window may still be in
* the CPU; we need to force it out to the stack.
*/
KERNEL_LOCK(1, l);
#ifdef DEBUG
if (rwindow_debug)
printf("cpu%d:%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n",
@ -569,14 +562,11 @@ badtrap:
if (pcb->pcb_nsaved)
panic("trap T_WINUF");
pcb->pcb_nsaved = -1; /* mark success */
KERNEL_UNLOCK_LAST(l);
break;
case T_ALIGN:
if ((p->p_md.md_flags & MDP_FIXALIGN) != 0) {
KERNEL_LOCK(1, l);
n = fixalign(l, tf);
KERNEL_UNLOCK_LAST(l);
if (n == 0) {
ADVANCE;
break;
@ -598,7 +588,6 @@ badtrap:
* will not match once fpu_cleanup does its job, so
* we must not save again later.)
*/
KERNEL_LOCK(1, l);
if (l != cpuinfo.fplwp)
panic("fpe without being the FP user");
FPU_LOCK(s);
@ -606,7 +595,6 @@ badtrap:
cpuinfo.fplwp = NULL;
l->l_md.md_fpu = NULL;
FPU_UNLOCK(s);
KERNEL_UNLOCK_LAST(l);
/* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */
if ((code = fpu_cleanup(l, l->l_md.md_fpstate)) != 0) {
sig = SIGFPE;
@ -658,12 +646,10 @@ badtrap:
case T_FLUSHWIN:
write_user_windows();
#ifdef probably_slower_since_this_is_usually_false
KERNEL_LOCK(1, l);
if (pcb->pcb_nsaved && rwindow_save(p)) {
mutex_enter(&p->p_smutex);
sigexit(l, SIGILL);
}
KERNEL_UNLOCK_LAST(l);
#endif
ADVANCE;
break;
@ -703,10 +689,8 @@ badtrap:
break;
}
if (sig != 0) {
KERNEL_LOCK(1, l);
ksi.ksi_signo = sig;
trapsignal(l, &ksi);
KERNEL_UNLOCK_LAST(l);
}
userret(l, pc, sticks);
share_fpu(l, tf);
@ -806,9 +790,6 @@ mem_access_fault(unsigned type, int ser, u_int v, int pc, int psr,
LWP_CACHE_CREDS(l, p);
sticks = p->p_sticks;
if ((psr & PSR_PS) == 0)
KERNEL_LOCK(1, l);
#ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fplwp != l)
@ -974,7 +955,6 @@ kfault:
}
out:
if ((psr & PSR_PS) == 0) {
KERNEL_UNLOCK_LAST(l);
userret(l, pc, sticks);
share_fpu(l, tf);
}
@ -1054,11 +1034,6 @@ mem_access_fault4m(unsigned type, u_int sfsr, u_int sfva, struct trapframe *tf)
goto out_nounlock;
}
if ((psr & PSR_PS) == 0)
KERNEL_LOCK(1, l);
else
KERNEL_LOCK(1, NULL);
/*
* Figure out what to pass the VM code. We cannot ignore the sfva
* register on text faults, since this might be a trap on an
@ -1188,7 +1163,6 @@ mem_access_fault4m(unsigned type, u_int sfsr, u_int sfva, struct trapframe *tf)
if (va >= KERNBASE) {
rv = uvm_fault(kernel_map, va, atype);
if (rv == 0) {
KERNEL_UNLOCK_ONE(NULL);
return;
}
goto kfault;
@ -1232,7 +1206,6 @@ kfault:
tf->tf_pc = onfault;
tf->tf_npc = onfault + 4;
tf->tf_out[0] = (rv == EACCES) ? EFAULT : rv;
KERNEL_UNLOCK_ONE(NULL);
return;
}
KSI_INIT_TRAP(&ksi);
@ -1255,13 +1228,10 @@ kfault:
}
out:
if ((psr & PSR_PS) == 0) {
KERNEL_UNLOCK_LAST(l);
out_nounlock:
userret(l, pc, sticks);
share_fpu(l, tf);
}
else
KERNEL_UNLOCK_ONE(NULL);
}
#endif /* SUN4M */
@ -1283,7 +1253,6 @@ startlwp(void *arg)
#endif
pool_put(&lwp_uc_pool, uc);
KERNEL_UNLOCK_LAST(l);
userret(l, l->l_md.md_tf->tf_pc, 0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: fdc.c,v 1.19 2007/11/28 20:41:35 jnemeth Exp $ */
/* $NetBSD: fdc.c,v 1.20 2008/01/02 11:48:29 ad Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -108,7 +108,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fdc.c,v 1.19 2007/11/28 20:41:35 jnemeth Exp $");
__KERNEL_RCSID(0, "$NetBSD: fdc.c,v 1.20 2008/01/02 11:48:29 ad Exp $");
#include "opt_ddb.h"
#include "opt_md.h"
@ -2327,12 +2327,13 @@ fdformat(dev_t dev, struct ne7_fd_formb *finfo, struct proc *p)
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = getiobuf_nowait();
bp = getiobuf(NULL, false);
if (bp == NULL)
return ENOBUFS;
bp->b_vp = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_proc = p;
bp->b_dev = dev;
@ -2515,14 +2516,13 @@ fd_read_md_image(size_t *sizep, void **addrp)
bp->b_error = 0;
bp->b_resid = 0;
bp->b_proc = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_RAW | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_RAW | B_READ;
bp->b_blkno = btodb(offset);
bp->b_bcount = DEV_BSIZE;
bp->b_data = addr;
fdstrategy(bp);
while ((bp->b_flags & B_DONE) == 0) {
tsleep((void *)bp, PRIBIO + 1, "physio", 0);
}
biowait(bp);
if (bp->b_error)
panic("fd: mountroot: fdread error %d", bp->b_error);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.202 2007/12/09 20:12:55 martin Exp $ */
/* $NetBSD: pmap.c,v 1.203 2008/01/02 11:48:30 ad Exp $ */
/*
*
* Copyright (C) 1996-1999 Eduardo Horvath.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.202 2007/12/09 20:12:55 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.203 2008/01/02 11:48:30 ad Exp $");
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
#define HWREF
@ -45,6 +45,8 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.202 2007/12/09 20:12:55 martin Exp $");
#include <sys/core.h>
#include <sys/kcore.h>
#include <sys/proc.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <uvm/uvm.h>
@ -53,7 +55,6 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.202 2007/12/09 20:12:55 martin Exp $");
#include <machine/ctlreg.h>
#include <machine/promlib.h>
#include <machine/kcore.h>
#include <machine/cpu.h>
#include <machine/bootinfo.h>
#include "cache.h"
@ -122,8 +123,8 @@ extern int pseg_set(struct pmap *, vaddr_t, int64_t, paddr_t);
#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
(((pv)->pv_va) & PV_MASK)))
struct pool pmap_pmap_pool;
struct pool pmap_pv_pool;
struct pool_cache pmap_cache;
struct pool_cache pmap_pv_cache;
pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
@ -292,6 +293,10 @@ int numctx;
static int pmap_get_page(paddr_t *p);
static void pmap_free_page(paddr_t pa);
/*
* Global pmap lock.
*/
static kmutex_t pmap_lock;
/*
* Support for big page sizes. This maps the page size to the
@ -929,7 +934,7 @@ pmap_bootstrap(u_long kernelstart, u_long kernelend)
/*
* Allocate and clear out pmap_kernel()->pm_segs[]
*/
simple_lock_init(&pmap_kernel()->pm_lock);
mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
pmap_kernel()->pm_refs = 1;
pmap_kernel()->pm_ctx = 0;
@ -1170,10 +1175,10 @@ pmap_init()
/*
* initialize the pmap pools.
*/
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
&pool_allocator_nointr, IPL_NONE);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry",
&pool_allocator_nointr, IPL_NONE);
pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
"pmappl", NULL, IPL_NONE, NULL, NULL, NULL);
pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0, 0,
"pv_entry", NULL, IPL_NONE, NULL, NULL, NULL);
vm_first_phys = avail_start;
vm_num_phys = avail_end - avail_start;
@ -1219,7 +1224,7 @@ pmap_growkernel(maxkvaddr)
(void *)KERNEND, (void *)maxkvaddr);
return (kbreak);
}
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr));
/* Align with the start of a page table */
for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr;
@ -1237,7 +1242,7 @@ pmap_growkernel(maxkvaddr)
ENTER_STAT(ptpneeded);
}
}
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
return (kbreak);
}
@ -1251,11 +1256,10 @@ pmap_create()
DPRINTF(PDB_CREATE, ("pmap_create()\n"));
pm = pool_get(&pmap_pmap_pool, PR_WAITOK);
pm = pool_cache_get(&pmap_cache, PR_WAITOK);
memset(pm, 0, sizeof *pm);
DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
simple_lock_init(&pm->pm_lock);
pm->pm_refs = 1;
TAILQ_INIT(&pm->pm_obj.memq);
if (pm != pmap_kernel()) {
@ -1276,9 +1280,7 @@ pmap_reference(pm)
struct pmap *pm;
{
simple_lock(&pm->pm_lock);
pm->pm_refs++;
simple_unlock(&pm->pm_lock);
atomic_inc_uint(&pm->pm_refs);
}
/*
@ -1290,12 +1292,8 @@ pmap_destroy(pm)
struct pmap *pm;
{
struct vm_page *pg, *nextpg;
int refs;
simple_lock(&pm->pm_lock);
refs = --pm->pm_refs;
simple_unlock(&pm->pm_lock);
if (refs > 0) {
if (atomic_dec_uint_nv(&pm->pm_refs) > 0) {
return;
}
DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
@ -1309,7 +1307,7 @@ pmap_destroy(pm)
uvm_pagefree(pg);
}
pmap_free_page((paddr_t)(u_long)pm->pm_segs);
pool_put(&pmap_pmap_pool, pm);
pool_cache_put(&pmap_cache, pm);
}
/*
@ -1357,7 +1355,7 @@ pmap_collect(pm)
if (pm == pmap_kernel())
return;
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
for (i = 0; i < STSZ; i++) {
pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
ASI_PHYS_CACHED);
@ -1396,7 +1394,7 @@ pmap_collect(pm)
pmap_free_page(pa);
}
}
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
}
/*
@ -1627,7 +1625,7 @@ pmap_enter(pm, va, pa, prot, flags)
* entering the same PA again. if it's different remove it.
*/
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
data = pseg_get(pm, va);
if (data & TLB_V) {
wasmapped = TRUE;
@ -1665,17 +1663,17 @@ pmap_enter(pm, va, pa, prot, flags)
*/
if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
if (npv != NULL) {
pool_put(&pmap_pv_pool, npv);
pool_cache_put(&pmap_pv_cache, npv); /* XXXAD defer */
npv = NULL;
}
if (wasmapped && opa == pa) {
dopv = FALSE;
}
} else if (npv == NULL) {
npv = pool_get(&pmap_pv_pool, PR_NOWAIT);
npv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT); /* XXXAD defer */
if (npv == NULL) {
if (flags & PMAP_CANFAIL) {
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
return (ENOMEM);
}
panic("pmap_enter: no pv entries available");
@ -1686,7 +1684,7 @@ pmap_enter(pm, va, pa, prot, flags)
ENTER_STAT(unmanaged);
dopv = FALSE;
if (npv != NULL) {
pool_put(&pmap_pv_pool, npv);
pool_cache_put(&pmap_pv_cache, npv); /* XXXAD defer */
npv = NULL;
}
}
@ -1709,7 +1707,7 @@ pmap_enter(pm, va, pa, prot, flags)
#else
/* If it needs ref accounting do nothing. */
if (!(flags & VM_PROT_READ)) {
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
return 0;
}
#endif
@ -1754,9 +1752,9 @@ pmap_enter(pm, va, pa, prot, flags)
ptp = 0;
if (!pmap_get_page(&ptp)) {
if (flags & PMAP_CANFAIL) {
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
if (npv != NULL) {
pool_put(&pmap_pv_pool, npv);
pool_cache_put(&pmap_pv_cache, npv); /* XXXAD defer */
}
return (ENOMEM);
} else {
@ -1776,7 +1774,7 @@ pmap_enter(pm, va, pa, prot, flags)
pmap_enter_pv(pm, va, pa, pg, npv);
}
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
#ifdef DEBUG
i = ptelookup_va(va);
if (pmapdebug & PDB_ENTER)
@ -1876,7 +1874,7 @@ pmap_remove(pm, va, endva)
KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
(void *)(u_long)va, (void *)(u_long)endva));
REMOVE_STAT(calls);
@ -1905,7 +1903,7 @@ pmap_remove(pm, va, endva)
if (pg) {
pv = pmap_remove_pv(pm, va, pg);
if (pv != NULL) {
pool_put(&pmap_pv_pool, pv);
pool_cache_put(&pmap_pv_cache, pv); /* XXXAD defer */
}
}
@ -1942,13 +1940,13 @@ pmap_remove(pm, va, endva)
REMOVE_STAT(tflushes);
tlb_flush_pte(va, pm->pm_ctx);
}
simple_unlock(&pm->pm_lock);
if (flush && pm->pm_refs) {
REMOVE_STAT(flushes);
blast_dcache();
}
DPRINTF(PDB_REMOVE, ("\n"));
pv_check();
mutex_exit(&pmap_lock);
}
/*
@ -1974,7 +1972,7 @@ pmap_protect(pm, sva, eva, prot)
return;
}
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
sva = sva & ~PGOFSET;
for (; sva < eva; sva += PAGE_SIZE) {
#ifdef DEBUG
@ -2030,8 +2028,8 @@ pmap_protect(pm, sva, eva, prot)
tsb_invalidate(pm->pm_ctx, sva);
tlb_flush_pte(sva, pm->pm_ctx);
}
simple_unlock(&pm->pm_lock);
pv_check();
mutex_exit(&pmap_lock);
}
/*
@ -2066,7 +2064,7 @@ pmap_extract(pm, va, pap)
return TRUE;
} else {
if (pm != pmap_kernel()) {
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
}
data = pseg_get(pm, va);
pa = data & TLB_PA_MASK;
@ -2100,7 +2098,7 @@ pmap_extract(pm, va, pap)
}
#endif
if (pm != pmap_kernel()) {
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
}
}
if ((data & TLB_V) == 0)
@ -2123,7 +2121,7 @@ pmap_kprotect(va, prot)
int64_t data;
int rv;
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
data = pseg_get(pm, va);
KASSERT(data & TLB_V);
if (prot & VM_PROT_WRITE) {
@ -2136,7 +2134,7 @@ pmap_kprotect(va, prot)
panic("pmap_kprotect: pseg_set needs spare! rv=%d", rv);
tsb_invalidate(pm->pm_ctx, va);
tlb_flush_pte(va, pm->pm_ctx);
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
}
/*
@ -2355,6 +2353,7 @@ pmap_clear_modify(pg)
#if defined(DEBUG)
modified = pmap_is_modified(pg);
#endif
mutex_enter(&pmap_lock);
/* Clear all mappings */
pv = &pg->mdpage.mdpg_pvh;
#ifdef DEBUG
@ -2376,7 +2375,6 @@ pmap_clear_modify(pg)
struct pmap *pmap = pv->pv_pmap;
vaddr_t va = pv->pv_va & PV_VAMASK;
simple_lock(&pmap->pm_lock);
/* First clear the mod bit in the PTE and make it R/O */
data = pseg_get(pmap, va);
KASSERT(data & TLB_V);
@ -2400,10 +2398,10 @@ pmap_clear_modify(pg)
if (pv->pv_va & PV_MOD)
changed |= 1;
pv->pv_va &= ~(PV_MOD);
simple_unlock(&pmap->pm_lock);
}
}
pv_check();
mutex_exit(&pmap_lock);
#ifdef DEBUG
if (pmap_is_modified(pg)) {
printf("pmap_clear_modify(): %p still modified!\n", pg);
@ -2432,6 +2430,7 @@ pmap_clear_reference(pg)
int referenced = 0;
#endif
mutex_enter(&pmap_lock);
#ifdef DEBUG
DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg));
referenced = pmap_is_referenced(pg);
@ -2453,7 +2452,6 @@ pmap_clear_reference(pg)
struct pmap *pmap = pv->pv_pmap;
vaddr_t va = pv->pv_va & PV_VAMASK;
simple_lock(&pmap->pm_lock);
data = pseg_get(pmap, va);
KASSERT(data & TLB_V);
DPRINTF(PDB_CHANGEPROT,
@ -2480,7 +2478,6 @@ pmap_clear_reference(pg)
if (pv->pv_va & PV_REF)
changed |= 1;
pv->pv_va &= ~(PV_REF);
simple_unlock(&pmap->pm_lock);
}
}
dcache_flush_page(pa);
@ -2497,8 +2494,12 @@ pmap_clear_reference(pg)
printf("pmap_clear_reference: referenced %d changed %d\n",
referenced, changed);
Debugger();
} else return (referenced);
} else {
mutex_exit(&pmap_lock);
return (referenced);
}
#endif
mutex_exit(&pmap_lock);
return (changed);
}
@ -2622,15 +2623,15 @@ pmap_unwire(pmap, va)
return;
}
#endif
simple_lock(&pmap->pm_lock);
mutex_enter(&pmap_lock);
data = pseg_get(pmap, va & PV_VAMASK);
KASSERT(data & TLB_V);
data &= ~TLB_TSB_LOCK;
rv = pseg_set(pmap, va & PV_VAMASK, data, 0);
if (rv & 1)
panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv);
simple_unlock(&pmap->pm_lock);
pv_check();
mutex_exit(&pmap_lock);
}
/*
@ -2648,7 +2649,7 @@ pmap_page_protect(pg, prot)
int64_t data = 0;
int rv;
paddr_t pa = VM_PAGE_TO_PHYS(pg);
pv_entry_t pv, npv, firstpv;
pv_entry_t pv, npv, firstpv, freepv = NULL;
struct pmap *pmap;
vaddr_t va;
bool needflush = FALSE;
@ -2656,6 +2657,7 @@ pmap_page_protect(pg, prot)
DPRINTF(PDB_CHANGEPROT,
("pmap_page_protect: pg %p prot %x\n", pg, prot));
mutex_enter(&pmap_lock);
pv = &pg->mdpage.mdpg_pvh;
if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
/* copy_on_write */
@ -2680,7 +2682,6 @@ pmap_page_protect(pg, prot)
pmap = pv->pv_pmap;
va = pv->pv_va & PV_VAMASK;
simple_lock(&pmap->pm_lock);
DPRINTF(PDB_CHANGEPROT | PDB_REF,
("pmap_page_protect: "
"RO va %p of pg %p...\n",
@ -2705,7 +2706,6 @@ pmap_page_protect(pg, prot)
tsb_invalidate(pmap->pm_ctx, va);
tlb_flush_pte(va, pmap->pm_ctx);
}
simple_unlock(&pmap->pm_lock);
}
}
} else {
@ -2721,7 +2721,6 @@ pmap_page_protect(pg, prot)
va = npv->pv_va & PV_VAMASK;
/* We're removing npv from pv->pv_next */
simple_lock(&pmap->pm_lock);
DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
("pmap_page_protect: "
"demap va %p of pg %p in pmap %p...\n",
@ -2748,11 +2747,11 @@ pmap_page_protect(pg, prot)
if (pmap->pm_refs > 0) {
needflush = TRUE;
}
simple_unlock(&pmap->pm_lock);
/* free the pv */
pv->pv_next = npv->pv_next;
pool_put(&pmap_pv_pool, npv);
npv->pv_next = freepv;
freepv = npv;
}
pv = firstpv;
@ -2768,7 +2767,6 @@ pmap_page_protect(pg, prot)
pmap = pv->pv_pmap;
va = pv->pv_va & PV_VAMASK;
simple_lock(&pmap->pm_lock);
DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
("pmap_page_protect: "
"demap va %p of pg %p from pm %p...\n",
@ -2793,7 +2791,6 @@ pmap_page_protect(pg, prot)
if (pmap->pm_refs > 0) {
needflush = TRUE;
}
simple_unlock(&pmap->pm_lock);
npv = pv->pv_next;
/* dump the first pv */
if (npv) {
@ -2801,7 +2798,8 @@ pmap_page_protect(pg, prot)
pv->pv_pmap = npv->pv_pmap;
pv->pv_va |= npv->pv_va & PV_MASK;
pv->pv_next = npv->pv_next;
pool_put(&pmap_pv_pool, npv);
npv->pv_next = freepv;
freepv = npv;
} else {
pv->pv_pmap = NULL;
pv->pv_next = NULL;
@ -2813,6 +2811,13 @@ pmap_page_protect(pg, prot)
}
/* We should really only flush the pages we demapped. */
pv_check();
mutex_exit(&pmap_lock);
/* Catch up on deferred frees. */
for (; freepv != NULL; freepv = npv) {
npv = freepv->pv_next;
pool_cache_put(&pmap_pv_cache, freepv);
}
}
#ifdef PMAP_COUNT_DEBUG
@ -2828,7 +2833,7 @@ pmap_count_res(struct pmap *pm)
/* Almost the same as pmap_collect() */
/* Don't want one of these pages reused while we're reading it. */
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
n = 0;
for (i = 0; i < STSZ; i++) {
pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
@ -2850,7 +2855,7 @@ pmap_count_res(struct pmap *pm)
}
}
}
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
if (pm->pm_stats.resident_count != n)
printf("pmap_count_resident: pm_stats = %ld, counted: %d\n",
@ -2871,7 +2876,7 @@ pmap_count_wired(struct pmap *pm)
/* Almost the same as pmap_collect() */
/* Don't want one of these pages reused while we're reading it. */
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock);
n = 0;
for (i = 0; i < STSZ; i++) {
pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
@ -2893,7 +2898,7 @@ pmap_count_wired(struct pmap *pm)
}
}
}
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
if (pm->pm_stats.wired_count != n)
printf("pmap_count_wired: pm_stats = %ld, counted: %d\n",
@ -2921,7 +2926,7 @@ ctx_alloc(struct pmap *pm)
KASSERT(pm != pmap_kernel());
KASSERT(pm == curproc->p_vmspace->vm_map.pmap);
simple_lock(&pm->pm_lock);
mutex_enter(&pmap_lock); /* XXXAD ctxswitch */
ctx = pmap_next_ctx++;
/*
@ -2949,7 +2954,7 @@ ctx_alloc(struct pmap *pm)
ctxbusy[ctx] = pm->pm_physaddr;
LIST_INSERT_HEAD(&pmap_ctxlist, pm, pm_list);
pm->pm_ctx = ctx;
simple_unlock(&pm->pm_lock);
mutex_exit(&pmap_lock);
DPRINTF(PDB_CTX_ALLOC, ("ctx_alloc: allocated ctx %d\n", ctx));
return ctx;
}
@ -3000,6 +3005,8 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
{
pv_entry_t pvh;
KASSERT(mutex_owned(&pmap_lock));
pvh = &pg->mdpage.mdpg_pvh;
DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n",
pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next));
@ -3060,6 +3067,8 @@ pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg)
pv_entry_t pvh, npv, pv;
int64_t data = 0;
KASSERT(mutex_owned(&pmap_lock));
pvh = &pg->mdpage.mdpg_pvh;
DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap,
@ -3135,14 +3144,14 @@ pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
vaddr_t va;
int rv;
KASSERT(mutex_owned(&pmap_lock));
DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
(unsigned long long)pa));
pg = PHYS_TO_VM_PAGE(pa);
pv = &pg->mdpage.mdpg_pvh;
while (pv) {
va = pv->pv_va & PV_VAMASK;
if (pv->pv_pmap != pm)
simple_lock(&pv->pv_pmap->pm_lock);
if (pv->pv_va & PV_NC) {
int64_t data;
@ -3174,8 +3183,6 @@ pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
panic("pmap_page_cache: pseg_set needs"
" spare! rv=%d\n", rv);
}
if (pv->pv_pmap != pm)
simple_unlock(&pv->pv_pmap->pm_lock);
if (pv->pv_pmap->pm_ctx || pv->pv_pmap == pmap_kernel()) {
/* Force reload -- cache bits have changed */
tsb_invalidate(pv->pv_pmap->pm_ctx, va);

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.60 2007/12/04 15:12:07 tsutsui Exp $ */
/* $NetBSD: fd.c,v 1.61 2008/01/02 11:48:30 ad Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -72,7 +72,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.60 2007/12/04 15:12:07 tsutsui Exp $");
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.61 2008/01/02 11:48:30 ad Exp $");
#include "opt_ddb.h"
@ -1765,18 +1765,19 @@ fdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
int
fdformat(dev_t dev, struct ne7_fd_formb *finfo, struct proc *p)
{
int rv = 0, s;
int rv = 0;
struct fd_softc *fd = fd_cd.cd_devs[FDUNIT(dev)];
struct fd_type *type = fd->sc_type;
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = (struct buf *)malloc(sizeof(struct buf), M_TEMP, M_NOWAIT);
bp = getiobuf(NULL, false);
if (bp == 0)
return (ENOBUFS);
memset((void *)bp, 0, sizeof(struct buf));
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_cflags = BC_BUSY;
bp->b_proc = p;
bp->b_dev = dev;
@ -1800,13 +1801,14 @@ fdformat(dev_t dev, struct ne7_fd_formb *finfo, struct proc *p)
fdstrategy(bp);
/* ...and wait for it to complete */
s = splbio();
while (!(bp->b_flags & B_DONE)) {
rv = tsleep((void *)bp, PRIBIO, "fdform", 20 * hz);
/* XXX dodgy */
mutex_enter(bp->b_objlock);
while (!(bp->b_oflags & BO_DONE)) {
rv = cv_timedwait(&bp->b_done, 20 * hz);
if (rv == EWOULDBLOCK)
break;
}
splx(s);
mutex_exit(bp->b_objlock);
if (rv == EWOULDBLOCK) {
/* timed out */
@ -1814,7 +1816,7 @@ fdformat(dev_t dev, struct ne7_fd_formb *finfo, struct proc *p)
biodone(bp);
} else if (bp->b_error != 0)
rv = bp->b_error;
free(bp, M_TEMP);
putiobuf(bp);
return (rv);
}
@ -1943,14 +1945,13 @@ fd_read_md_image(size_t *sizep, void **addrp)
bp->b_error = 0;
bp->b_resid = 0;
bp->b_proc = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_RAW | B_READ;
bp->b_flags = B_PHYS | B_RAW | B_READ;
bp->b_cflags = BC_BUSY;
bp->b_blkno = btodb(offset);
bp->b_bcount = DEV_BSIZE;
bp->b_data = addr;
fdstrategy(bp);
while ((bp->b_flags & B_DONE) == 0) {
tsleep((void *)bp, PRIBIO + 1, "physio", 0);
}
biowait(bp);
if (bp->b_error)
panic("fd: mountroot: fdread error %d", bp->b_error);

View File

@ -1,4 +1,4 @@
/* $NetBSD: xd.c,v 1.60 2007/10/17 19:57:45 garbled Exp $ */
/* $NetBSD: xd.c,v 1.61 2008/01/02 11:48:30 ad Exp $ */
/*
*
@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.60 2007/10/17 19:57:45 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.61 2008/01/02 11:48:30 ad Exp $");
#undef XDC_DEBUG /* full debug */
#define XDC_DIAG /* extra sanity checks */
@ -309,8 +309,8 @@ xddummystrat(struct buf *bp)
if (bp->b_bcount != XDFM_BPS)
panic("xddummystrat");
memcpy(bp->b_data, xd_labeldata, XDFM_BPS);
bp->b_flags |= B_DONE;
bp->b_flags &= ~B_BUSY;
bp->b_oflags |= BO_DONE;
bp->b_cflags &= ~BC_BUSY;
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: xy.c,v 1.63 2007/10/17 19:57:45 garbled Exp $ */
/* $NetBSD: xy.c,v 1.64 2008/01/02 11:48:30 ad Exp $ */
/*
*
@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.63 2007/10/17 19:57:45 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.64 2008/01/02 11:48:30 ad Exp $");
#undef XYC_DEBUG /* full debug */
#undef XYC_DIAG /* extra sanity checks */
@ -247,8 +247,8 @@ xydummystrat(struct buf *bp)
if (bp->b_bcount != XYFM_BPS)
panic("xydummystrat");
memcpy(bp->b_data, xy_labeldata, XYFM_BPS);
bp->b_flags |= B_DONE;
bp->b_flags &= ~B_BUSY;
bp->b_oflags |= BO_DONE;
bp->b_cflags &= ~BC_BUSY;
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: ts.c,v 1.34 2007/10/17 19:57:58 garbled Exp $ */
/* $NetBSD: ts.c,v 1.35 2008/01/02 11:48:31 ad Exp $ */
/*-
* Copyright (c) 1991 The Regents of the University of California.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ts.c,v 1.34 2007/10/17 19:57:58 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: ts.c,v 1.35 2008/01/02 11:48:31 ad Exp $");
#define TS11_COMPAT /* don't use extended features provided by TS05 */
@ -371,27 +371,15 @@ tscommand (dev, cmd, count)
int count;
{
register struct buf *bp;
register int s;
trace (("tscommand (%d, %x, %d)\n", TS_UNIT(dev), cmd, count));
s = splbio();
bp = &ts_cbuf[TS_UNIT(dev)];
while (bp->b_flags & B_BUSY) {
/*
* This special check is because B_BUSY never
* gets cleared in the non-waiting rewind case. ???
*/
if (bp->b_bcount == 0 && (bp->b_flags & B_DONE))
break;
bp->b_flags |= B_WANTED;
(void) tsleep(bp, PRIBIO, "tscmd", 0);
/* check MOT-flag !!! */
}
bp->b_flags = B_BUSY | B_READ;
splx(s);
mutex_enter(&bufcache_lock);
while (bbusy(bp) != 0)
;
mutex_exit(&bufcache_lock);
bp->b_flags |= B_READ;
/*
* Load the buffer. The b_count field gets used to hold the command
@ -414,8 +402,10 @@ tscommand (dev, cmd, count)
}
debug (("tscommand: calling biowait ...\n"));
biowait (bp);
if (bp->b_flags & B_WANTED)
wakeup ((void *)bp);
mutex_enter(&bufcache_lock);
bp->b_flags &= ~B_WANTED;
cv_broadcast(&bp->b_busy);
mutex_exit(&bufcache_lock);
bp->b_error = 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cfl.c,v 1.16 2007/10/17 19:57:59 garbled Exp $ */
/* $NetBSD: cfl.c,v 1.17 2008/01/02 11:48:31 ad Exp $ */
/*-
* Copyright (c) 1982, 1986 The Regents of the University of California.
* All rights reserved.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cfl.c,v 1.16 2007/10/17 19:57:59 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: cfl.c,v 1.17 2008/01/02 11:48:31 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -195,16 +195,17 @@ cflrw(dev, uio, flag)
break;
}
if (uio->uio_rw == UIO_WRITE) {
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
} else {
bp->b_flags &= ~(B_WRITE|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_WRITE);
bp->b_flags |= B_READ;
}
s = splconsmedia();
cflstart();
while ((bp->b_flags & B_DONE) == 0)
(void) tsleep(bp, PRIBIO, "cflrw", 0);
biowait(bp);
splx(s);
if (bp->b_error != 0) {
error = bp->b_error;
@ -284,7 +285,6 @@ void
cflrint(int ch)
{
struct buf *bp = cfltab.cfl_buf;
int s;
switch (cfltab.cfl_active) {
case CFL_NEXT:
@ -292,10 +292,10 @@ cflrint(int ch)
cfltab.cfl_active = CFL_GETIN;
else {
cfltab.cfl_active = CFL_IDLE;
s = splbio();
bp->b_flags |= B_DONE;
splx(s);
wakeup(bp);
mutex_enter(bp->b_objlock);
bp->b_oflags |= BO_DONE;
cv_broadcast(&bp->b_done);
mutex_exit(bp->b_objlock);
}
break;
@ -303,10 +303,10 @@ cflrint(int ch)
*cfltab.cfl_xaddr++ = ch & 0377;
if (--bp->b_bcount==0) {
cfltab.cfl_active = CFL_IDLE;
s = splbio();
bp->b_flags |= B_DONE;
splx(s);
wakeup(bp);
mutex_enter(bp->b_objlock);
bp->b_oflags |= BO_DONE;
cv_broadcast(&bp->b_done);
mutex_exit(bp->b_objlock);
}
break;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: crl.c,v 1.23 2007/10/17 19:57:59 garbled Exp $ */
/* $NetBSD: crl.c,v 1.24 2008/01/02 11:48:31 ad Exp $ */
/*-
* Copyright (c) 1982, 1986 The Regents of the University of California.
* All rights reserved.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: crl.c,v 1.23 2007/10/17 19:57:59 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: crl.c,v 1.24 2008/01/02 11:48:31 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -150,16 +150,17 @@ crlrw(dev, uio, flag)
break;
}
if (uio->uio_rw == UIO_WRITE) {
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
} else {
bp->b_flags &= ~(B_WRITE|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_WRITE);
bp->b_flags |= B_READ;
}
s = splconsmedia();
crlstart();
while ((bp->b_flags & B_DONE) == 0)
(void) tsleep(bp, PRIBIO, "crlrw", 0);
biowait(bp);
splx(s);
if (bp->b_error != 0) {
error = bp->b_error;
@ -231,7 +232,7 @@ crlintr(arg)
case CRL_F_READ:
case CRL_F_WRITE:
bp->b_flags |= B_DONE;
bp->b_oflags |= BO_DONE;
}
crltab.crl_active = 0;
wakeup((void *)bp);
@ -254,7 +255,7 @@ crlintr(arg)
case CRL_S_ABORT:
crltab.crl_active = CRL_F_RETSTS;
mtpr(STXCS_IE | CRL_F_RETSTS, PR_STXCS);
bp->b_flags |= B_DONE;
bp->b_oflags |= BO_DONE;
bp->b_error = EIO;
break;
@ -266,9 +267,9 @@ crlintr(arg)
case CRL_S_HNDSHK:
printf("crl: hndshk error\n"); /* dump out some status too? */
crltab.crl_active = 0;
bp->b_flags |= B_DONE;
bp->b_oflags |= BO_DONE;
bp->b_error = EIO;
wakeup((void *)bp);
cv_broadcast(&bp->b_done);
break;
case CRL_S_HWERR:

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.45 2007/10/17 19:57:59 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.46 2008/01/02 11:48:31 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.45 2007/10/17 19:57:59 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.46 2008/01/02 11:48:31 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -263,7 +263,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *),
goto done;
dlp = (struct disklabel *)((char *)bp->b_data + LABELOFFSET);
bcopy(lp, dlp, sizeof(struct disklabel));
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.32 2007/10/17 19:58:04 garbled Exp $ */
/* $NetBSD: disksubr.c,v 1.33 2008/01/02 11:48:32 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.32 2007/10/17 19:58:04 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: disksubr.c,v 1.33 2008/01/02 11:48:32 ad Exp $");
#include "opt_compat_netbsd.h"
@ -138,7 +138,7 @@ dodospart:
labelsz = howmany(sizeof(struct cpu_disklabel),
lp->d_secsize) * lp->d_secsize;
bp->b_bcount = labelsz; /* to support < 512B/sector disks */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
(*strat)(bp);
/* if successful, wander through Human68k partition table */
@ -213,7 +213,7 @@ dobadsect:
i = 0;
do {
/* read a bad sector table */
bp->b_flags &= ~(B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i;
if (lp->d_secsize > DEF_BSIZE)
@ -349,7 +349,8 @@ writedisklabel(dev_t dev, void (*strat)(struct buf *),
if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC &&
dkcksum(dlp) == 0) {
*dlp = *lp;
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);
@ -368,7 +369,7 @@ dodospart:
/* read the x68k disk magic */
bp->b_blkno = DOSBBSECTOR;
bp->b_bcount = lp->d_secsize;
bp->b_flags &= ~(B_WRITE|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_cylinder = DOSBBSECTOR / lp->d_secpercyl;
(*strat)(bp);
@ -381,7 +382,7 @@ dodospart:
labelsz = howmany(sizeof(struct cpu_disklabel),
lp->d_secsize) * lp->d_secsize;
bp->b_bcount = labelsz;
bp->b_flags &= ~(B_WRITE|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags |= B_READ;
bp->b_cylinder = DOSPARTOFF / lp->d_secpercyl;
(*strat)(bp);
@ -441,7 +442,8 @@ dodospart:
dp->dp_start = start;
dp->dp_size = size;
}
bp->b_flags &= ~(B_READ|B_DONE);
bp->b_oflags &= ~(BO_DONE);
bp->b_flags &= ~(B_READ);
bp->b_flags |= B_WRITE;
(*strat)(bp);
error = biowait(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.15 2007/12/20 23:46:11 ad Exp $ */
/* $NetBSD: pmap.c,v 1.16 2008/01/02 11:48:33 ad Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@ -154,7 +154,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15 2007/12/20 23:46:11 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.16 2008/01/02 11:48:33 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -364,20 +364,6 @@ long nbpd[] = NBPD_INITIALIZER;
pd_entry_t *normal_pdes[] = PDES_INITIALIZER;
pd_entry_t *alternate_pdes[] = APDES_INITIALIZER;
/*
* locking data structures. to enable the locks, changes from the
* 'vmlocking' cvs branch are required. for now, just stub them out.
*/
#define rw_enter(a, b) /* nothing */
#define rw_exit(a) /* nothing */
#define mutex_enter(a) simple_lock(a)
#define mutex_exit(a) simple_unlock(a)
#define mutex_init(a, b, c) simple_lock_init(a)
#define mutex_owned(a) (1)
#define mutex_destroy(a) /* nothing */
#define kmutex_t struct simplelock
static kmutex_t pmaps_lock;
static krwlock_t pmap_main_lock;
@ -1986,8 +1972,6 @@ pmap_destroy(struct pmap *pmap)
* remove it from global list of pmaps
*/
KERNEL_LOCK(1, NULL);
mutex_enter(&pmaps_lock);
LIST_REMOVE(pmap, pm_list);
mutex_exit(&pmaps_lock);
@ -2025,8 +2009,6 @@ pmap_destroy(struct pmap *pmap)
for (i = 0; i < PTP_LEVELS - 1; i++)
mutex_destroy(&pmap->pm_obj[i].vmobjlock);
pool_cache_put(&pmap_cache, pmap);
KERNEL_UNLOCK_ONE(NULL);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbdback.c,v 1.28 2007/11/26 19:01:27 pooka Exp $ */
/* $NetBSD: xbdback.c,v 1.29 2008/01/02 11:48:33 ad Exp $ */
/*
* Copyright (c) 2005 Manuel Bouyer.
@ -853,6 +853,8 @@ xbdback_co_io_gotreq(struct xbdback_instance *xbdi, void *obj)
static void *
xbdback_co_io_loop(struct xbdback_instance *xbdi, void *obj)
{
struct xbdback_io *xio;
(void)obj;
if (xbdi->segno < xbdi->xen_req->nr_segments) {
unsigned long this_fas, last_fas;
@ -898,7 +900,9 @@ xbdback_co_io_loop(struct xbdback_instance *xbdi, void *obj)
if (xbdi->io == NULL) {
xbdi->cont = xbdback_co_io_gotio;
return xbdback_pool_get(&xbdback_io_pool, xbdi);
xio = xbdback_pool_get(&xbdback_io_pool, xbdi);
buf_init(&xio->xio_buf);
return xio;
} else {
xbdi->cont = xbdback_co_io_gotio2;
}
@ -929,16 +933,18 @@ xbdback_co_io_gotio(struct xbdback_instance *xbdi, void *obj)
start_offset = blkif_first_sect(xbdi->this_fas) * VBD_BSIZE;
if (xbdi->xen_req->operation == BLKIF_OP_WRITE) {
buf_flags = B_WRITE | B_CALL;
buf_flags = B_WRITE;
} else {
buf_flags = B_READ | B_CALL;
buf_flags = B_READ;
}
BUF_INIT(&xbd_io->xio_buf);
xbd_io->xio_buf.b_flags = buf_flags;
xbd_io->xio_buf.b_cflags = 0;
xbd_io->xio_buf.b_oflags = 0;
xbd_io->xio_buf.b_iodone = xbdback_iodone;
xbd_io->xio_buf.b_proc = NULL;
xbd_io->xio_buf.b_vp = xbdi->req_vbd->vp;
xbd_io->xio_buf.b_objlock = &xbdi->req_vbd->vp->v_interlock;
xbd_io->xio_buf.b_dev = xbdi->req_vbd->dev;
xbd_io->xio_buf.b_blkno = xbdi->next_sector;
xbd_io->xio_buf.b_bcount = 0;
@ -1133,12 +1139,14 @@ xbdback_iodone(struct buf *bp)
xbdback_pool_put(&xbdback_request_pool, xbd_req);
}
xbdi_put(xbdi);
buf_destroy(&xbd_io->xio_buf);
xbdback_pool_put(&xbdback_io_pool, xbd_io);
}
static void *
xbdback_co_probe(struct xbdback_instance *xbdi, void *obj)
{
struct xbdback_io *xio;
(void)obj;
/*
* There should be only one page in the request. Map it and store
@ -1153,7 +1161,9 @@ xbdback_co_probe(struct xbdback_instance *xbdi, void *obj)
return xbdi;
}
xbdi->cont = xbdback_co_probe_gotio;
return xbdback_pool_get(&xbdback_io_pool, xbdi);
xio = xbdback_pool_get(&xbdback_io_pool, xbdi);
buf_init(&xio->xio_buf);
return xio;
}
static void *
@ -1202,6 +1212,7 @@ xbdback_co_probe_gotvm(struct xbdback_instance *xbdi, void *obj)
xbdback_unmap_shm(xbdi->io);
XENPRINTF(("xbdback_probe: nreplies=%d\n", i));
xbdback_send_reply(xbdi, req->id, req->operation, i);
buf_destroy(&xbdi->io->xio_buf);
xbdback_pool_put(&xbdback_io_pool, xbdi->io);
xbdi->io = NULL;
xbdi->cont = xbdback_co_main_incr;

View File

@ -1,4 +1,4 @@
/* $NetBSD: xbdback_xenbus.c,v 1.11 2007/11/26 19:01:28 pooka Exp $ */
/* $NetBSD: xbdback_xenbus.c,v 1.12 2008/01/02 11:48:33 ad Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -953,6 +953,8 @@ xbdback_co_io_gotreq(struct xbdback_instance *xbdi, void *obj)
static void *
xbdback_co_io_loop(struct xbdback_instance *xbdi, void *obj)
{
struct xbdback_io *xio;
(void)obj;
if (xbdi->xbdi_segno < xbdi->xbdi_xen_req.nr_segments) {
uint8_t this_fs, this_ls, last_fs, last_ls;
@ -1014,7 +1016,8 @@ xbdback_co_io_loop(struct xbdback_instance *xbdi, void *obj)
if (xbdi->xbdi_io == NULL) {
xbdi->xbdi_cont = xbdback_co_io_gotio;
return xbdback_pool_get(&xbdback_io_pool, xbdi);
xio = xbdback_pool_get(&xbdback_io_pool, xbdi);
buf_init(&xio->xio_buf);
} else {
xbdi->xbdi_cont = xbdback_co_io_gotio2;
}
@ -1045,16 +1048,18 @@ xbdback_co_io_gotio(struct xbdback_instance *xbdi, void *obj)
start_offset = xbdi->xbdi_this_fs * VBD_BSIZE;
if (xbdi->xbdi_xen_req.operation == BLKIF_OP_WRITE) {
buf_flags = B_WRITE | B_CALL;
buf_flags = B_WRITE;
} else {
buf_flags = B_READ | B_CALL;
buf_flags = B_READ;
}
BUF_INIT(&xbd_io->xio_buf);
xbd_io->xio_buf.b_flags = buf_flags;
xbd_io->xio_buf.b_cflags = 0;
xbd_io->xio_buf.b_oflags = 0;
xbd_io->xio_buf.b_iodone = xbdback_iodone;
xbd_io->xio_buf.b_proc = NULL;
xbd_io->xio_buf.b_vp = xbdi->xbdi_vp;
xbd_io->xio_buf.b_objlock = &xbdi->xbdi_vp->v_interlock;
xbd_io->xio_buf.b_dev = xbdi->xbdi_dev;
xbd_io->xio_buf.b_blkno = xbdi->xbdi_next_sector;
xbd_io->xio_buf.b_bcount = 0;
@ -1249,6 +1254,7 @@ xbdback_iodone(struct buf *bp)
xbdback_pool_put(&xbdback_request_pool, xbd_req);
}
xbdi_put(xbdi);
buf_destroy(&xbd_io->xio_buf);
xbdback_pool_put(&xbdback_io_pool, xbd_io);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: coda_vnops.c,v 1.65 2007/12/25 18:33:35 perry Exp $ */
/* $NetBSD: coda_vnops.c,v 1.66 2008/01/02 11:48:34 ad Exp $ */
/*
*
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.65 2007/12/25 18:33:35 perry Exp $");
__KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.66 2008/01/02 11:48:34 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -872,7 +872,7 @@ coda_inactive(void *v)
printf("coda_inactive: %p ovp != NULL\n", vp);
}
VOP_UNLOCK(vp, 0);
vgone(vp);
*ap->a_recycle = true;
}
MARK_INT_SAT(CODA_INACTIVE_STATS);
@ -2002,7 +2002,7 @@ coda_getpages(void *v)
/* Check for control object. */
if (IS_CTL_VP(vp)) {
printf("coda_getpages: control object %p\n", vp);
simple_unlock(&vp->v_uobj.vmobjlock);
mutex_exit(&vp->v_uobj.vmobjlock);
return(EINVAL);
}
@ -2017,7 +2017,7 @@ coda_getpages(void *v)
waslocked = VOP_ISLOCKED(vp);
/* Drop the vmobject lock. */
simple_unlock(&vp->v_uobj.vmobjlock);
mutex_exit(&vp->v_uobj.vmobjlock);
/* Get container file if not already present. */
if (cp->c_ovp == NULL) {
@ -2065,7 +2065,7 @@ coda_getpages(void *v)
ap->a_vp = cp->c_ovp;
/* Get the lock on the container vnode, and call getpages on it. */
simple_lock(&ap->a_vp->v_uobj.vmobjlock);
mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
/* If we opened the vnode, we must close it. */
@ -2106,7 +2106,7 @@ coda_putpages(void *v)
int error;
/* Drop the vmobject lock. */
simple_unlock(&vp->v_uobj.vmobjlock);
mutex_exit(&vp->v_uobj.vmobjlock);
/* Check for control object. */
if (IS_CTL_VP(vp)) {
@ -2127,7 +2127,7 @@ coda_putpages(void *v)
ap->a_vp = cp->c_ovp;
/* Get the lock on the container vnode, and call putpages on it. */
simple_lock(&ap->a_vp->v_uobj.vmobjlock);
mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
return error;

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_fcntl.c,v 1.64 2007/12/20 23:03:04 dsl Exp $ */
/* $NetBSD: svr4_fcntl.c,v 1.65 2008/01/02 11:48:35 ad Exp $ */
/*-
* Copyright (c) 1994, 1997 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_fcntl.c,v 1.64 2007/12/20 23:03:04 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_fcntl.c,v 1.65 2008/01/02 11:48:35 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -253,9 +253,9 @@ fd_revoke(struct lwp *l, int fd, register_t *retval)
KAUTH_GENERIC_ISSUSER, NULL)) != 0)
goto out;
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
revoke = (vp->v_usecount > 1 || (vp->v_iflag & VI_ALIASED));
simple_unlock(&vp->v_interlock);
mutex_exit(&vp->v_interlock);
if (revoke)
VOP_REVOKE(vp, REVOKEALL);
out:

View File

@ -1,4 +1,4 @@
/* $NetBSD: ata_raid.c,v 1.23 2007/07/09 21:00:30 ad Exp $ */
/* $NetBSD: ata_raid.c,v 1.24 2008/01/02 11:48:36 ad Exp $ */
/*
* Copyright (c) 2003 Wasabi Systems, Inc.
@ -40,7 +40,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ata_raid.c,v 1.23 2007/07/09 21:00:30 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: ata_raid.c,v 1.24 2008/01/02 11:48:36 ad Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@ -294,8 +294,7 @@ ata_raid_config_block_rw(struct vnode *vp, daddr_t blkno, void *tbuf,
struct buf *bp;
int error;
bp = getiobuf();
bp->b_vp = vp;
bp = getiobuf(vp, NULL);
bp->b_blkno = blkno;
bp->b_bcount = bp->b_resid = size;
bp->b_flags = bflags;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ld_ataraid.c,v 1.22 2007/11/26 19:01:36 pooka Exp $ */
/* $NetBSD: ld_ataraid.c,v 1.23 2008/01/02 11:48:37 ad Exp $ */
/*
* Copyright (c) 2003 Wasabi Systems, Inc.
@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ld_ataraid.c,v 1.22 2007/11/26 19:01:36 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: ld_ataraid.c,v 1.23 2008/01/02 11:48:37 ad Exp $");
#include "rnd.h"
@ -246,8 +246,10 @@ ld_ataraid_make_cbuf(struct ld_ataraid_softc *sc, struct buf *bp,
cbp = CBUF_GET();
if (cbp == NULL)
return (NULL);
BUF_INIT(&cbp->cb_buf);
cbp->cb_buf.b_flags = bp->b_flags | B_CALL;
buf_init(&cbp->cb_buf);
cbp->cb_buf.b_flags = bp->b_flags;
cbp->cb_buf.b_oflags = bp->b_oflags;
cbp->cb_buf.b_cflags = bp->b_cflags;
cbp->cb_buf.b_iodone = sc->sc_iodone;
cbp->cb_buf.b_proc = bp->b_proc;
cbp->cb_buf.b_vp = sc->sc_vnodes[comp];
@ -303,6 +305,7 @@ ld_ataraid_start_span(struct ld_softc *ld, struct buf *bp)
/* Free the already allocated component buffers. */
while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) {
SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q);
buf_destroy(&cbp->cb_buf);
CBUF_PUT(cbp);
}
return (EAGAIN);
@ -400,6 +403,7 @@ free_and_exit:
/* Free the already allocated component buffers. */
while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) {
SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q);
buf_destroy(&cbp->cb_buf);
CBUF_PUT(cbp);
}
return (error);

View File

@ -1,4 +1,4 @@
/* $NetBSD: wd.c,v 1.354 2007/12/18 15:30:40 joerg Exp $ */
/* $NetBSD: wd.c,v 1.355 2008/01/02 11:48:37 ad Exp $ */
/*
* Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.354 2007/12/18 15:30:40 joerg Exp $");
__KERNEL_RCSID(0, "$NetBSD: wd.c,v 1.355 2008/01/02 11:48:37 ad Exp $");
#include "opt_ata.h"
@ -670,7 +670,9 @@ wd_split_mod15_write(struct buf *bp)
* Advance the pointer to the second half and issue that command
* using the same opening.
*/
bp->b_flags = obp->b_flags | B_CALL;
bp->b_flags = obp->b_flags;
bp->b_oflags = obp->b_oflags;
bp->b_cflags = obp->b_cflags;
bp->b_data = (char *)bp->b_data + bp->b_bcount;
bp->b_blkno += (bp->b_bcount / 512);
bp->b_rawblkno += (bp->b_bcount / 512);
@ -705,7 +707,7 @@ __wdstart(struct wd_softc *wd, struct buf *bp)
struct buf *nbp;
/* already at splbio */
nbp = getiobuf_nowait();
nbp = getiobuf(NULL, false);
if (__predict_false(nbp == NULL)) {
/* No memory -- fail the iop. */
bp->b_error = ENOMEM;
@ -717,7 +719,6 @@ __wdstart(struct wd_softc *wd, struct buf *bp)
nbp->b_error = 0;
nbp->b_proc = bp->b_proc;
nbp->b_vp = NULLVP;
nbp->b_dev = bp->b_dev;
nbp->b_bcount = bp->b_bcount / 2;
@ -727,7 +728,9 @@ __wdstart(struct wd_softc *wd, struct buf *bp)
nbp->b_blkno = bp->b_blkno;
nbp->b_rawblkno = bp->b_rawblkno;
nbp->b_flags = bp->b_flags | B_CALL;
nbp->b_flags = bp->b_flags;
nbp->b_oflags = bp->b_oflags;
nbp->b_cflags = bp->b_cflags;
nbp->b_iodone = wd_split_mod15_write;
/* Put ptr to orig buf in b_private and use new buf */
@ -881,8 +884,7 @@ noerror: if ((wd->sc_wdc_bio.flags & ATA_CORR) || wd->retries > 0)
rnd_add_uint32(&wd->rnd_source, bp->b_blkno);
#endif
/* XXX Yuck, but we don't want to increment openings in this case */
if (__predict_false((bp->b_flags & B_CALL) != 0 &&
bp->b_iodone == wd_split_mod15_write))
if (__predict_false(bp->b_iodone == wd_split_mod15_write))
biodone(bp);
else {
biodone(bp);
@ -1957,7 +1959,7 @@ wi_get(void)
int s;
wi = malloc(sizeof(struct wd_ioctl), M_TEMP, M_WAITOK|M_ZERO);
simple_lock_init(&wi->wi_bp.b_interlock);
buf_init(&wi->wi_bp);
s = splbio();
LIST_INSERT_HEAD(&wi_head, wi, wi_list);
splx(s);
@ -1976,6 +1978,7 @@ wi_free(struct wd_ioctl *wi)
s = splbio();
LIST_REMOVE(wi, wi_list);
splx(s);
buf_destroy(&wi->wi_bp);
free(wi, M_TEMP);
}
@ -2031,7 +2034,7 @@ wdioctlstrategy(struct buf *bp)
printf("wdioctlstrategy: "
"No matching ioctl request found in queue\n");
error = EINVAL;
goto done;
goto bad;
}
memset(&ata_c, 0, sizeof(ata_c));
@ -2043,7 +2046,7 @@ wdioctlstrategy(struct buf *bp)
if (bp->b_bcount != wi->wi_atareq.datalen) {
printf("physio split wd ioctl request... cannot proceed\n");
error = EIO;
goto done;
goto bad;
}
/*
@ -2055,7 +2058,7 @@ wdioctlstrategy(struct buf *bp)
(bp->b_bcount / wi->wi_softc->sc_dk.dk_label->d_secsize) >=
(1 << NBBY)) {
error = EINVAL;
goto done;
goto bad;
}
/*
@ -2064,7 +2067,7 @@ wdioctlstrategy(struct buf *bp)
if (wi->wi_atareq.timeout == 0) {
error = EINVAL;
goto done;
goto bad;
}
if (wi->wi_atareq.flags & ATACMD_READ)
@ -2092,8 +2095,7 @@ wdioctlstrategy(struct buf *bp)
if (wi->wi_softc->atabus->ata_exec_command(wi->wi_softc->drvp, &ata_c)
!= ATACMD_COMPLETE) {
wi->wi_atareq.retsts = ATACMD_ERROR;
error = EIO;
goto done;
goto bad;
}
if (ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
@ -2116,7 +2118,10 @@ wdioctlstrategy(struct buf *bp)
}
}
done:
bp->b_error = 0;
biodone(bp);
return;
bad:
bp->b_error = error;
biodone(bp);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: ccd.c,v 1.125 2007/12/05 07:06:50 ad Exp $ */
/* $NetBSD: ccd.c,v 1.126 2008/01/02 11:48:36 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 1999, 2007 The NetBSD Foundation, Inc.
@ -125,7 +125,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ccd.c,v 1.125 2007/12/05 07:06:50 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: ccd.c,v 1.126 2008/01/02 11:48:36 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -837,8 +837,10 @@ ccdbuffer(struct ccd_softc *cs, struct buf *bp, daddr_t bn, void *addr,
cbp = CCD_GETBUF();
if (cbp == NULL)
return (NULL);
BUF_INIT(&cbp->cb_buf);
cbp->cb_buf.b_flags = bp->b_flags | B_CALL;
buf_init(&cbp->cb_buf);
cbp->cb_buf.b_flags = bp->b_flags;
cbp->cb_buf.b_oflags = bp->b_oflags;
cbp->cb_buf.b_cflags = bp->b_cflags;
cbp->cb_buf.b_iodone = ccdiodone;
cbp->cb_buf.b_proc = bp->b_proc;
cbp->cb_buf.b_dev = ci->ci_dev;
@ -924,6 +926,7 @@ ccdiodone(struct buf *vbp)
cs->sc_xname, bp->b_error, cbp->cb_comp);
}
count = cbp->cb_buf.b_bcount;
buf_destroy(&cbp->cb_buf);
CCD_PUTBUF(cbp);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: cgd.c,v 1.48 2007/11/26 19:01:34 pooka Exp $ */
/* $NetBSD: cgd.c,v 1.49 2008/01/02 11:48:36 ad Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.48 2007/11/26 19:01:34 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.49 2008/01/02 11:48:36 ad Exp $");
#include <sys/types.h>
#include <sys/param.h>
@ -295,6 +295,7 @@ cgdstart(struct dk_softc *dksc, struct buf *bp)
void * addr;
void * newaddr;
daddr_t bn;
struct vnode *vp;
DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
@ -306,7 +307,7 @@ cgdstart(struct dk_softc *dksc, struct buf *bp)
* we can fail quickly if they are unavailable.
*/
nbp = getiobuf_nowait();
nbp = getiobuf(cs->sc_tvn, false);
if (nbp == NULL) {
disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
return -1;
@ -330,18 +331,22 @@ cgdstart(struct dk_softc *dksc, struct buf *bp)
}
nbp->b_data = newaddr;
nbp->b_flags = bp->b_flags | B_CALL;
nbp->b_flags = bp->b_flags;
nbp->b_oflags = bp->b_oflags;
nbp->b_cflags = bp->b_cflags;
nbp->b_iodone = cgdiodone;
nbp->b_proc = bp->b_proc;
nbp->b_blkno = bn;
nbp->b_vp = cs->sc_tvn;
nbp->b_bcount = bp->b_bcount;
nbp->b_private = bp;
BIO_COPYPRIO(nbp, bp);
if ((nbp->b_flags & B_READ) == 0) {
V_INCR_NUMOUTPUT(nbp->b_vp);
vp = nbp->b_vp;
mutex_enter(&vp->v_interlock);
vp->v_numoutput++;
mutex_exit(&vp->v_interlock);
}
VOP_STRATEGY(cs->sc_tvn, nbp);
return 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: dk.c,v 1.31 2007/12/09 20:27:56 jmcneill Exp $ */
/* $NetBSD: dk.c,v 1.32 2008/01/02 11:48:37 ad Exp $ */
/*-
* Copyright (c) 2004, 2005, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: dk.c,v 1.31 2007/12/09 20:27:56 jmcneill Exp $");
__KERNEL_RCSID(0, "$NetBSD: dk.c,v 1.32 2008/01/02 11:48:37 ad Exp $");
#include "opt_dkwedge.h"
@ -856,7 +856,7 @@ dkwedge_read(struct disk *pdk, struct vnode *vp, daddr_t blkno,
{
struct buf b;
BUF_INIT(&b);
buf_init(&b);
b.b_vp = vp;
b.b_dev = vp->v_rdev;
@ -1037,6 +1037,7 @@ dkstrategy(struct buf *bp)
static void
dkstart(struct dkwedge_softc *sc)
{
struct vnode *vp;
struct buf *bp, *nbp;
/* Do as much work as has been enqueued. */
@ -1056,7 +1057,7 @@ dkstart(struct dkwedge_softc *sc)
/* Instrumentation. */
disk_busy(&sc->sc_dk);
nbp = getiobuf_nowait();
nbp = getiobuf(sc->sc_parent->dk_rawvp, false);
if (nbp == NULL) {
/*
* No resources to run this request; leave the
@ -1070,21 +1071,25 @@ dkstart(struct dkwedge_softc *sc)
(void) BUFQ_GET(sc->sc_bufq);
BUF_INIT(nbp);
nbp->b_data = bp->b_data;
nbp->b_flags = bp->b_flags | B_CALL;
nbp->b_flags = bp->b_flags;
nbp->b_oflags = bp->b_oflags;
nbp->b_cflags = bp->b_cflags;
nbp->b_iodone = dkiodone;
nbp->b_proc = bp->b_proc;
nbp->b_blkno = bp->b_rawblkno;
nbp->b_dev = sc->sc_parent->dk_rawvp->v_rdev;
nbp->b_vp = sc->sc_parent->dk_rawvp;
nbp->b_bcount = bp->b_bcount;
nbp->b_private = bp;
BIO_COPYPRIO(nbp, bp);
if ((nbp->b_flags & B_READ) == 0)
V_INCR_NUMOUTPUT(nbp->b_vp);
VOP_STRATEGY(nbp->b_vp, nbp);
vp = nbp->b_vp;
if ((nbp->b_flags & B_READ) == 0) {
mutex_enter(&vp->v_interlock);
vp->v_numoutput++;
mutex_exit(&vp->v_interlock);
}
VOP_STRATEGY(vp, nbp);
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: fss.c,v 1.41 2007/12/08 19:29:41 pooka Exp $ */
/* $NetBSD: fss.c,v 1.42 2008/01/02 11:48:36 ad Exp $ */
/*-
* Copyright (c) 2003 The NetBSD Foundation, Inc.
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.41 2007/12/08 19:29:41 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: fss.c,v 1.42 2008/01/02 11:48:36 ad Exp $");
#include "fss.h"
@ -895,8 +895,8 @@ restart:
if (len > MAXPHYS)
len = MAXPHYS;
bp = getiobuf();
bp->b_flags = B_READ|B_CALL;
bp = getiobuf(NULL, true);
bp->b_flags = B_READ;
bp->b_bcount = len;
bp->b_bufsize = bp->b_bcount;
bp->b_error = 0;
@ -904,7 +904,6 @@ restart:
bp->b_blkno = dblk;
bp->b_proc = NULL;
bp->b_dev = sc->sc_bdev;
bp->b_vp = NULLVP;
bp->b_private = scp;
bp->b_iodone = fss_cluster_iodone;
@ -952,7 +951,7 @@ fss_bs_io(struct fss_softc *sc, fss_io_type rw,
data, len, off, UIO_SYSSPACE, IO_UNIT|IO_NODELOCKED,
sc->sc_bs_lwp->l_cred, NULL, NULL);
if (error == 0) {
simple_lock(&sc->sc_bs_vp->v_interlock);
mutex_enter(&sc->sc_bs_vp->v_interlock);
error = VOP_PUTPAGES(sc->sc_bs_vp, trunc_page(off),
round_page(off+len), PGO_CLEANIT|PGO_SYNCIO|PGO_FREE);
}
@ -1019,7 +1018,7 @@ fss_bs_thread(void *arg)
scl = sc->sc_cache+sc->sc_cache_size;
nbp = getiobuf();
nbp = getiobuf(NULL, true);
nfreed = nio = 1; /* Dont sleep the first time */
@ -1148,7 +1147,7 @@ fss_bs_thread(void *arg)
FSS_UNLOCK(sc, s);
BUF_INIT(nbp);
buf_init(nbp);
nbp->b_flags = B_READ;
nbp->b_bcount = bp->b_bcount;
nbp->b_bufsize = bp->b_bcount;
@ -1157,7 +1156,6 @@ fss_bs_thread(void *arg)
nbp->b_blkno = bp->b_blkno;
nbp->b_proc = bp->b_proc;
nbp->b_dev = sc->sc_bdev;
nbp->b_vp = NULLVP;
bdev_strategy(nbp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ct.c,v 1.12 2007/10/08 20:12:06 ad Exp $ */
/* $NetBSD: ct.c,v 1.13 2008/01/02 11:48:37 ad Exp $ */
/*-
* Copyright (c) 1996-2003 The NetBSD Foundation, Inc.
@ -128,7 +128,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ct.c,v 1.12 2007/10/08 20:12:06 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: ct.c,v 1.13 2008/01/02 11:48:37 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -475,6 +475,7 @@ ctcommand(dev, cmd, cnt)
sc->sc_bp = bp;
sc->sc_cmd = cmd;
bp->b_dev = dev;
bp->b_objlock = &buffer_lock;
if (cmd == MTFSF) {
nbp = (struct buf *)geteblk(MAXBSIZE);
bp->b_data = nbp->b_data;
@ -482,7 +483,9 @@ ctcommand(dev, cmd, cnt)
}
while (cnt-- > 0) {
bp->b_flags = B_BUSY;
bp->b_flags = 0;
bp->b_cflags = BC_BUSY;
bp->b_oflags = 0;
if (cmd == MTBSF) {
sc->sc_blkno = sc->sc_eofs[sc->sc_eofp];
sc->sc_eofp--;

View File

@ -1,4 +1,4 @@
/* $NetBSD: mt.c,v 1.11 2007/07/29 12:15:43 ad Exp $ */
/* $NetBSD: mt.c,v 1.12 2008/01/02 11:48:37 ad Exp $ */
/*-
* Copyright (c) 1996-2003 The NetBSD Foundation, Inc.
@ -121,7 +121,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mt.c,v 1.11 2007/07/29 12:15:43 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: mt.c,v 1.12 2008/01/02 11:48:37 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -515,13 +515,16 @@ mtcommand(dev, cmd, cnt)
sc = device_lookup(&mt_cd, MTUNIT(dev));
bp = &sc->sc_bufstore;
if (bp->b_flags & B_BUSY)
if (bp->b_cflags & BC_BUSY)
return (EBUSY);
bp->b_cmd = cmd;
bp->b_dev = dev;
bp->b_objlock = &buffer_lock;
do {
bp->b_flags = B_BUSY | B_CMD;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_CMD;
bp->b_oflags = 0;
mtstrategy(bp);
biowait(bp);
if (bp->b_error != 0) {
@ -530,9 +533,9 @@ mtcommand(dev, cmd, cnt)
}
} while (--cnt > 0);
#if 0
bp->b_flags = 0 /*&= ~B_BUSY*/;
bp->b_cflags = 0 /*&= ~BC_BUSY*/;
#else
bp->b_flags &= ~B_BUSY;
bp->b_cflags &= ~BC_BUSY;
#endif
return (error);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.77 2007/10/19 12:00:16 ad Exp $ */
/* $NetBSD: fd.c,v 1.78 2008/01/02 11:48:37 ad Exp $ */
/*-
* Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
@ -88,7 +88,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.77 2007/10/19 12:00:16 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: fd.c,v 1.78 2008/01/02 11:48:37 ad Exp $");
#include "rnd.h"
#include "opt_ddb.h"
@ -1518,12 +1518,12 @@ fdformat(dev, finfo, l)
struct buf *bp;
/* set up a buffer header for fdstrategy() */
bp = getiobuf_nowait();
bp = getiobuf(NULL, false);
if (bp == NULL)
return ENOBUFS;
bp->b_vp = NULL;
bp->b_flags = B_BUSY | B_PHYS | B_FORMAT;
bp->b_cflags = BC_BUSY;
bp->b_flags = B_PHYS | B_FORMAT;
bp->b_proc = l->l_proc;
bp->b_dev = dev;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ts.c,v 1.21 2007/10/19 12:01:09 ad Exp $ */
/* $NetBSD: ts.c,v 1.22 2008/01/02 11:48:38 ad Exp $ */
/*-
* Copyright (c) 1991 The Regents of the University of California.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ts.c,v 1.21 2007/10/19 12:01:09 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: ts.c,v 1.22 2008/01/02 11:48:38 ad Exp $");
#undef TSDEBUG
@ -321,7 +321,6 @@ void
tscommand(struct ts_softc *sc, dev_t dev, int cmd, int count)
{
struct buf *bp;
int s;
#ifdef TSDEBUG
printf("tscommand (%x, %d)\n", cmd, count);
@ -329,20 +328,19 @@ tscommand(struct ts_softc *sc, dev_t dev, int cmd, int count)
bp = &sc->ts_cbuf;
s = splbio();
while (bp->b_flags & B_BUSY) {
mutex_enter(&bufcache_lock);
while (bp->b_cflags & BC_BUSY) {
/*
* This special check is because B_BUSY never
* This special check is because BC_BUSY never
* gets cleared in the non-waiting rewind case. ???
*/
if (bp->b_bcount == 0 && (bp->b_flags & B_DONE))
if (bp->b_bcount == 0 && (bp->b_oflags & BO_DONE))
break;
bp->b_flags |= B_WANTED;
(void) tsleep(bp, PRIBIO, "tscmd", 0);
(void )bbusy(bp, false, 0);
/* check MOT-flag !!! */
}
bp->b_flags = B_BUSY | B_READ;
splx(s);
bp->b_flags = B_READ;
mutex_exit(&bufcache_lock);
/*
* Load the buffer. The b_count field gets used to hold the command
@ -354,6 +352,8 @@ tscommand(struct ts_softc *sc, dev_t dev, int cmd, int count)
bp->b_bcount = count;
bp->b_resid = cmd;
bp->b_blkno = 0;
bp->b_oflags = 0;
bp->b_objlock = &buffer_lock;
tsstrategy(bp);
/*
* In case of rewind from close, don't wait.
@ -362,9 +362,10 @@ tscommand(struct ts_softc *sc, dev_t dev, int cmd, int count)
if (count == 0)
return;
biowait(bp);
if (bp->b_flags & B_WANTED)
wakeup((void *)bp);
bp->b_flags = 0;
mutex_enter(&bufcache_lock);
cv_broadcast(&bp->b_busy);
bp->b_cflags = 0;
mutex_exit(&bufcache_lock);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: rf_diskqueue.c,v 1.49 2007/03/04 06:02:37 christos Exp $ */
/* $NetBSD: rf_diskqueue.c,v 1.50 2008/01/02 11:48:38 ad Exp $ */
/*
* Copyright (c) 1995 Carnegie-Mellon University.
* All rights reserved.
@ -66,7 +66,7 @@
****************************************************************************/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.49 2007/03/04 06:02:37 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: rf_diskqueue.c,v 1.50 2008/01/02 11:48:38 ad Exp $");
#include <dev/raidframe/raidframevar.h>
@ -449,25 +449,19 @@ rf_CreateDiskQueueData(RF_IoType_t typ, RF_SectorNum_t ssect,
int waitflag)
{
RF_DiskQueueData_t *p;
int s;
s = splbio();
p = pool_get(&rf_pools.dqd, waitflag);
splx(s);
if (p == NULL)
return (NULL);
memset(p, 0, sizeof(RF_DiskQueueData_t));
if (waitflag == PR_WAITOK) {
p->bp = getiobuf();
p->bp = getiobuf(NULL, true);
} else {
p->bp = getiobuf_nowait();
p->bp = getiobuf(NULL, false);
}
if (p->bp == NULL) {
/* no memory for the buffer!?!? */
s = splbio();
pool_put(&rf_pools.dqd, p);
splx(s);
return (NULL);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: rf_netbsdkintf.c,v 1.241 2007/12/18 01:09:46 oster Exp $ */
/* $NetBSD: rf_netbsdkintf.c,v 1.242 2008/01/02 11:48:38 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
* All rights reserved.
@ -146,7 +146,7 @@
***********************************************************/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.241 2007/12/18 01:09:46 oster Exp $");
__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.242 2008/01/02 11:48:38 ad Exp $");
#include <sys/param.h>
#include <sys/errno.h>
@ -2208,7 +2208,9 @@ InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
struct proc *b_proc)
{
/* bp->b_flags = B_PHYS | rw_flag; */
bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
bp->b_flags = rw_flag; /* XXX need B_PHYS here too??? */
bp->b_oflags = 0;
bp->b_cflags = 0;
bp->b_bcount = numSect << logBytesPerSector;
bp->b_bufsize = bp->b_bcount;
bp->b_error = 0;
@ -2223,8 +2225,11 @@ InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
bp->b_iodone = cbFunc;
bp->b_private = cbArg;
bp->b_vp = b_vp;
bp->b_objlock = &b_vp->v_interlock;
if ((bp->b_flags & B_READ) == 0) {
bp->b_vp->v_numoutput++;
mutex_enter(&b_vp->v_interlock);
b_vp->v_numoutput++;
mutex_exit(&b_vp->v_interlock);
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cd.c,v 1.271 2007/12/09 20:28:22 jmcneill Exp $ */
/* $NetBSD: cd.c,v 1.272 2008/01/02 11:48:38 ad Exp $ */
/*-
* Copyright (c) 1998, 2001, 2003, 2004, 2005 The NetBSD Foundation, Inc.
@ -57,7 +57,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cd.c,v 1.271 2007/12/09 20:28:22 jmcneill Exp $");
__KERNEL_RCSID(0, "$NetBSD: cd.c,v 1.272 2008/01/02 11:48:38 ad Exp $");
#include "rnd.h"
@ -679,7 +679,7 @@ cdstrategy(struct buf *bp)
}
blkno = ((blkno * lp->d_secsize) / cd->params.blksize);
nbp = getiobuf_nowait();
nbp = getiobuf(false, NULL);
if (!nbp) {
/* No memory -- fail the iop. */
free(bounce, M_DEVBUF);
@ -698,14 +698,12 @@ cdstrategy(struct buf *bp)
/* Set up the IOP to the bounce buffer. */
nbp->b_error = 0;
nbp->b_proc = bp->b_proc;
nbp->b_vp = NULLVP;
nbp->b_bcount = count;
nbp->b_bufsize = count;
nbp->b_rawblkno = blkno;
nbp->b_flags = bp->b_flags | B_READ | B_CALL;
nbp->b_flags = bp->b_flags | B_READ;
nbp->b_oflags = bp->b_oflags;
nbp->b_cflags = bp->b_cflags;
nbp->b_iodone = cdbounce;
/* store bounce state in b_private and use new buf */
@ -970,7 +968,7 @@ cdbounce(struct buf *bp)
count = MAXPHYS;
}
nbp = getiobuf_nowait();
nbp = getiobuf(false, NULL);
if (!nbp) {
/* No memory -- fail the iop. */
bp->b_error = ENOMEM;
@ -980,15 +978,13 @@ cdbounce(struct buf *bp)
/* Set up the IOP to the bounce buffer. */
nbp->b_error = 0;
nbp->b_proc = obp->b_proc;
nbp->b_vp = NULLVP;
nbp->b_bcount = count;
nbp->b_bufsize = count;
nbp->b_data = bp->b_data;
nbp->b_rawblkno = blkno;
nbp->b_flags = obp->b_flags | B_READ | B_CALL;
nbp->b_flags = obp->b_flags | B_READ;
nbp->b_oflags = obp->b_oflags;
nbp->b_cflags = obp->b_cflags;
nbp->b_iodone = cdbounce;
/* store bounce state in b_private and use new buf */

View File

@ -1,4 +1,4 @@
/* $NetBSD: scsipi_ioctl.c,v 1.63 2007/07/29 12:50:23 ad Exp $ */
/* $NetBSD: scsipi_ioctl.c,v 1.64 2008/01/02 11:48:39 ad Exp $ */
/*-
* Copyright (c) 1998, 2004 The NetBSD Foundation, Inc.
@ -44,7 +44,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: scsipi_ioctl.c,v 1.63 2007/07/29 12:50:23 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: scsipi_ioctl.c,v 1.64 2008/01/02 11:48:39 ad Exp $");
#include "opt_compat_freebsd.h"
#include "opt_compat_netbsd.h"
@ -85,7 +85,7 @@ si_get(void)
int s;
si = malloc(sizeof(struct scsi_ioctl), M_TEMP, M_WAITOK|M_ZERO);
simple_lock_init(&si->si_bp.b_interlock);
buf_init(&si->si_bp);
s = splbio();
LIST_INSERT_HEAD(&si_head, si, si_list);
splx(s);
@ -100,6 +100,7 @@ si_free(struct scsi_ioctl *si)
s = splbio();
LIST_REMOVE(si, si_list);
splx(s);
buf_destroy(&si->si_bp);
free(si, M_TEMP);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: xd.c,v 1.71 2007/10/19 12:01:23 ad Exp $ */
/* $NetBSD: xd.c,v 1.72 2008/01/02 11:48:39 ad Exp $ */
/*
*
@ -51,7 +51,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.71 2007/10/19 12:01:23 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.72 2008/01/02 11:48:39 ad Exp $");
#undef XDC_DEBUG /* full debug */
#define XDC_DIAG /* extra sanity checks */
@ -331,8 +331,8 @@ xddummystrat(bp)
if (bp->b_bcount != XDFM_BPS)
panic("xddummystrat");
bcopy(xd_labeldata, bp->b_data, XDFM_BPS);
bp->b_flags |= B_DONE;
bp->b_flags &= ~B_BUSY;
bp->b_oflags |= BO_DONE;
bp->b_cflags &= ~BC_BUSY;
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: xy.c,v 1.74 2007/10/19 12:01:23 ad Exp $ */
/* $NetBSD: xy.c,v 1.75 2008/01/02 11:48:39 ad Exp $ */
/*
*
@ -51,7 +51,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.74 2007/10/19 12:01:23 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.75 2008/01/02 11:48:39 ad Exp $");
#undef XYC_DEBUG /* full debug */
#undef XYC_DIAG /* extra sanity checks */
@ -247,8 +247,8 @@ xydummystrat(bp)
if (bp->b_bcount != XYFM_BPS)
panic("xydummystrat");
bcopy(xy_labeldata, bp->b_data, XYFM_BPS);
bp->b_flags |= B_DONE;
bp->b_flags &= ~B_BUSY;
bp->b_oflags |= BO_DONE;
bp->b_cflags &= ~BC_BUSY;
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: vnd.c,v 1.174 2007/12/18 23:22:18 riz Exp $ */
/* $NetBSD: vnd.c,v 1.175 2008/01/02 11:48:36 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -137,7 +137,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.174 2007/12/18 23:22:18 riz Exp $");
__KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.175 2008/01/02 11:48:36 ad Exp $");
#if defined(_KERNEL_OPT)
#include "fs_nfs.h"
@ -620,11 +620,14 @@ vndthread(void *arg)
disk_busy(&vnd->sc_dkdev);
bp = &vnx->vx_buf;
BUF_INIT(bp);
bp->b_flags = (obp->b_flags & B_READ) | B_CALL;
buf_init(bp);
bp->b_flags = (obp->b_flags & B_READ);
bp->b_oflags = obp->b_oflags;
bp->b_cflags = obp->b_cflags;
bp->b_iodone = vndiodone;
bp->b_private = obp;
bp->b_vp = vnd->sc_vp;
bp->b_objlock = &bp->b_vp->v_interlock;
bp->b_data = obp->b_data;
bp->b_bcount = obp->b_bcount;
BIO_COPYPRIO(bp, obp);
@ -708,8 +711,11 @@ handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
/* We need to increase the number of outputs on the vnode if
* there was any write to it. */
if (!doread)
V_INCR_NUMOUTPUT(vp);
if (!doread) {
mutex_enter(&vp->v_interlock);
vp->v_numoutput++;
mutex_exit(&vp->v_interlock);
}
biodone(bp);
}
@ -727,15 +733,15 @@ handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
int bsize, error, flags, skipped;
size_t resid, sz;
off_t bn, offset;
struct vnode *vp;
flags = obp->b_flags;
if (!(flags & B_READ)) {
int s;
s = splbio();
V_INCR_NUMOUTPUT(bp->b_vp);
splx(s);
vp = bp->b_vp;
mutex_enter(&vp->v_interlock);
vp->v_numoutput++;
mutex_exit(&vp->v_interlock);
}
/* convert to a byte offset within the file. */
@ -756,7 +762,6 @@ handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
for (offset = 0, resid = bp->b_resid; resid;
resid -= sz, offset += sz) {
struct buf *nbp;
struct vnode *vp;
daddr_t nbn;
int off, nra;
@ -792,11 +797,11 @@ handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
#ifdef DEBUG
if (vnddebug & VDB_IO)
printf("vndstrategy: vp %p/%p bn 0x%qx/0x%" PRIx64
" sz 0x%zx\n",
vnd->sc_vp, vp, (long long)bn, nbn, sz);
" sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
nbn, sz);
#endif
nbp = getiobuf();
nbp = getiobuf(vp, true);
nestiobuf_setup(bp, nbp, offset, sz);
nbp->b_blkno = nbn + btodb(off);

View File

@ -1,4 +1,4 @@
/* $NetBSD: adutil.c,v 1.5 2007/10/10 20:42:22 ad Exp $ */
/* $NetBSD: adutil.c,v 1.6 2008/01/02 11:48:39 ad Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: adutil.c,v 1.5 2007/10/10 20:42:22 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: adutil.c,v 1.6 2008/01/02 11:48:39 ad Exp $");
#include <sys/param.h>
#include <sys/vnode.h>
@ -69,7 +69,7 @@ start_over:
for (ap = hp->lh_first; ap != NULL; ap = ap->link.le_next) {
if (ap->block == an) {
vp = ATOV(ap);
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
simple_unlock(&adosfs_hashlock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
goto start_over;

View File

@ -1,4 +1,4 @@
/* $NetBSD: advnops.c,v 1.26 2007/11/26 19:01:41 pooka Exp $ */
/* $NetBSD: advnops.c,v 1.27 2008/01/02 11:48:40 ad Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: advnops.c,v 1.26 2007/11/26 19:01:41 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: advnops.c,v 1.27 2008/01/02 11:48:40 ad Exp $");
#if defined(_KERNEL_OPT)
#include "opt_quota.h"
@ -847,15 +847,15 @@ adosfs_inactive(v)
{
struct vop_inactive_args /* {
struct vnode *a_vp;
bool *a_recycle;
} */ *sp = v;
struct vnode *vp = sp->a_vp;
struct lwp *l = curlwp;
#ifdef ADOSFS_DIAGNOSTIC
advopprint(sp);
#endif
VOP_UNLOCK(vp, 0);
/* XXX this needs to check if file was deleted */
vrecycle(vp, NULL, l);
*sp->a_recycle = true;
#ifdef ADOSFS_DIAGNOSTIC
printf(" 0)");

View File

@ -1,4 +1,4 @@
/* $NetBSD: cd9660_node.c,v 1.19 2007/12/08 14:41:11 ad Exp $ */
/* $NetBSD: cd9660_node.c,v 1.20 2008/01/02 11:48:40 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1989, 1994
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cd9660_node.c,v 1.19 2007/12/08 14:41:11 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: cd9660_node.c,v 1.20 2008/01/02 11:48:40 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -154,7 +154,7 @@ loop:
if (flags == 0) {
mutex_exit(&cd9660_ihash_lock);
} else {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&cd9660_ihash_lock);
if (vget(vp, flags | LK_INTERLOCK))
goto loop;
@ -209,6 +209,7 @@ cd9660_inactive(v)
{
struct vop_inactive_args /* {
struct vnode *a_vp;
bool *a_recycle;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct iso_node *ip = VTOI(vp);
@ -217,14 +218,13 @@ cd9660_inactive(v)
if (prtactive && vp->v_usecount != 0)
vprint("cd9660_inactive: pushing active", vp);
ip->i_flag = 0;
VOP_UNLOCK(vp, 0);
/*
* If we are done with the inode, reclaim it
* so that it can be reused immediately.
*/
if (ip->inode.iso_mode == 0)
vrecycle(vp, (struct simplelock *)0, curlwp);
ip->i_flag = 0;
*ap->a_recycle = (ip->inode.iso_mode == 0);
VOP_UNLOCK(vp, 0);
return error;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cd9660_vfsops.c,v 1.52 2007/12/08 19:29:42 pooka Exp $ */
/* $NetBSD: cd9660_vfsops.c,v 1.53 2008/01/02 11:48:40 ad Exp $ */
/*-
* Copyright (c) 1994
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cd9660_vfsops.c,v 1.52 2007/12/08 19:29:42 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: cd9660_vfsops.c,v 1.53 2008/01/02 11:48:40 ad Exp $");
#if defined(_KERNEL_OPT)
#include "opt_compat_netbsd.h"
@ -420,6 +420,7 @@ iso_mountfs(devvp, mp, l, argp)
mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
mp->mnt_stat.f_namemax = MAXNAMLEN;
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_iflag |= IMNT_MPSAFE;
mp->mnt_dev_bshift = iso_bsize;
mp->mnt_fs_bshift = isomp->im_bshift;
isomp->im_mountp = mp;
@ -871,7 +872,6 @@ cd9660_vget_internal(mp, ino, vpp, relocated, isodir)
vp->v_data = NULL;
VOP_UNLOCK(vp, 0);
vp->v_op = spec_vnodeop_p;
vrele(vp);
vgone(vp);
lockmgr(&nvp->v_lock, LK_EXCLUSIVE, &nvp->v_interlock);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: efs_ihash.c,v 1.1 2007/06/29 23:30:28 rumble Exp $ */
/* $NetBSD: efs_ihash.c,v 1.2 2008/01/02 11:48:40 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: efs_ihash.c,v 1.1 2007/06/29 23:30:28 rumble Exp $");
__KERNEL_RCSID(0, "$NetBSD: efs_ihash.c,v 1.2 2008/01/02 11:48:40 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -146,7 +146,7 @@ efs_ihashget(dev_t dev, ino_t inum, int flags)
if (flags == 0) {
mutex_exit(&efs_ihash_lock);
} else {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&efs_ihash_lock);
if (vget(vp, flags | LK_INTERLOCK))
goto loop;

View File

@ -1,4 +1,4 @@
/* $NetBSD: efs_vnops.c,v 1.12 2007/11/26 19:01:43 pooka Exp $ */
/* $NetBSD: efs_vnops.c,v 1.13 2008/01/02 11:48:40 ad Exp $ */
/*
* Copyright (c) 2006 Stephen M. Rumble <rumble@ephemeral.org>
@ -17,7 +17,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: efs_vnops.c,v 1.12 2007/11/26 19:01:43 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: efs_vnops.c,v 1.13 2008/01/02 11:48:40 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -559,14 +559,13 @@ efs_inactive(void *v)
struct vop_inactive_args /* {
const struct vnodeop_desc *a_desc;
struct vnode *a_vp;
bool *a_recycle
} */ *ap = v;
struct efs_inode *eip = EFS_VTOI(ap->a_vp);
*ap->a_recycle = (eip->ei_mode == 0);
VOP_UNLOCK(ap->a_vp, 0);
if (eip->ei_mode == 0)
vrecycle(ap->a_vp, NULL, curlwp);
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: filecore_node.c,v 1.12 2007/11/26 19:01:44 pooka Exp $ */
/* $NetBSD: filecore_node.c,v 1.13 2008/01/02 11:48:41 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1989, 1994
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: filecore_node.c,v 1.12 2007/11/26 19:01:44 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: filecore_node.c,v 1.13 2008/01/02 11:48:41 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -174,7 +174,7 @@ loop:
LIST_FOREACH(ip, &filecorehashtbl[INOHASH(dev, inum)], i_hash) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
simple_unlock(&filecore_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
goto loop;
@ -226,7 +226,7 @@ filecore_inactive(v)
{
struct vop_inactive_args /* {
struct vnode *a_vp;
struct lwp *a_l;
bool *a_recycle;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct filecore_node *ip = VTOI(vp);
@ -235,14 +235,13 @@ filecore_inactive(v)
if (prtactive && vp->v_usecount != 0)
vprint("filecore_inactive: pushing active", vp);
ip->i_flag = 0;
VOP_UNLOCK(vp, 0);
/*
* If we are done with the inode, reclaim it
* so that it can be reused immediately.
*/
if (filecore_staleinode(ip))
vrecycle(vp, (struct simplelock *)0, curlwp);
ip->i_flag = 0;
*ap->a_recycle = (filecore_staleinode(ip) != 0);
VOP_UNLOCK(vp, 0);
return error;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: hfs_nhash.c,v 1.3 2007/12/11 12:04:23 lukem Exp $ */
/* $NetBSD: hfs_nhash.c,v 1.4 2008/01/02 11:48:41 ad Exp $ */
/*-
* Copyright (c) 2005, 2007 The NetBSD Foundation, Inc.
@ -59,7 +59,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hfs_nhash.c,v 1.3 2007/12/11 12:04:23 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: hfs_nhash.c,v 1.4 2008/01/02 11:48:41 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -127,7 +127,7 @@ loop:
LIST_FOREACH(hp, hpp, h_hash) {
if (cnid == hp->h_rec.cnid && dev == hp->h_dev) {
vp = HTOV(hp);
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
simple_unlock(&hfs_nhash_slock);
if (vget(vp, flags | LK_INTERLOCK))
goto loop;

View File

@ -1,4 +1,4 @@
/* $NetBSD: hfs_subr.c,v 1.6 2007/11/26 19:01:45 pooka Exp $ */
/* $NetBSD: hfs_subr.c,v 1.7 2008/01/02 11:48:41 ad Exp $ */
/*-
* Copyright (c) 2005, 2007 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hfs_subr.c,v 1.6 2007/11/26 19:01:45 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: hfs_subr.c,v 1.7 2008/01/02 11:48:41 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -83,7 +83,6 @@ hfs_vinit(struct mount *mp, int (**specops)(void *), int (**fifoops)(void *),
vp->v_vflag &= ~VV_LOCKSWORK;
VOP_UNLOCK(vp, 0);
vp->v_op = specops;
vrele(vp);
vgone(vp);
lockmgr(&nvp->v_lock, LK_EXCLUSIVE,
&nvp->v_interlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: msdosfs_denode.c,v 1.29 2007/12/28 17:46:48 reinoud Exp $ */
/* $NetBSD: msdosfs_denode.c,v 1.30 2008/01/02 11:48:41 ad Exp $ */
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
@ -48,7 +48,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: msdosfs_denode.c,v 1.29 2007/12/28 17:46:48 reinoud Exp $");
__KERNEL_RCSID(0, "$NetBSD: msdosfs_denode.c,v 1.30 2008/01/02 11:48:41 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -176,7 +176,7 @@ loop:
if (flags == 0) {
mutex_exit(&msdosfs_ihash_lock);
} else {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&msdosfs_ihash_lock);
if (vget(vp, flags | LK_INTERLOCK))
goto loop;
@ -668,6 +668,7 @@ msdosfs_inactive(v)
{
struct vop_inactive_args /* {
struct vnode *a_vp;
bool *a_recycle;
} */ *ap = v;
struct vnode *vp = ap->a_vp;
struct denode *dep = VTODE(vp);
@ -704,7 +705,6 @@ msdosfs_inactive(v)
}
deupdat(dep, 0);
out:
VOP_UNLOCK(vp, 0);
/*
* If we are done with the denode, reclaim it
* so that it can be reused immediately.
@ -713,8 +713,8 @@ out:
printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n",
vp->v_usecount, dep->de_Name[0]);
#endif
if (dep->de_Name[0] == SLOT_DELETED)
vrecycle(vp, (struct simplelock *)0, curlwp);
*ap->a_recycle = (dep->de_Name[0] == SLOT_DELETED);
VOP_UNLOCK(vp, 0);
return (error);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: msdosfs_vfsops.c,v 1.55 2007/12/08 19:29:43 pooka Exp $ */
/* $NetBSD: msdosfs_vfsops.c,v 1.56 2008/01/02 11:48:41 ad Exp $ */
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
@ -48,7 +48,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: msdosfs_vfsops.c,v 1.55 2007/12/08 19:29:43 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: msdosfs_vfsops.c,v 1.56 2008/01/02 11:48:41 ad Exp $");
#if defined(_KERNEL_OPT)
#include "opt_quota.h"
@ -937,7 +937,7 @@ msdosfs_sync(mp, waitfor, cred)
int waitfor;
kauth_cred_t cred;
{
struct vnode *vp, *nvp;
struct vnode *vp, *mvp;
struct denode *dep;
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
int error, allerror = 0;
@ -953,44 +953,47 @@ msdosfs_sync(mp, waitfor, cred)
/* update fats here */
}
}
/* Allocate a marker vnode. */
if ((mvp = valloc(mp)) == NULL)
return ENOMEM;
/*
* Write back each (modified) denode.
*/
simple_lock(&mntvnode_slock);
mutex_enter(&mntvnode_lock);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
simple_lock(&vp->v_interlock);
nvp = TAILQ_NEXT(vp, v_mntvnodes);
for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
vmark(mvp, vp);
if (vp->v_mount != mp || vismarker(vp))
continue;
mutex_enter(&vp->v_interlock);
dep = VTODE(vp);
if (waitfor == MNT_LAZY || vp->v_type == VNON ||
(((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0) &&
(LIST_EMPTY(&vp->v_dirtyblkhd) &&
UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) {
simple_unlock(&vp->v_interlock);
mutex_exit(&vp->v_interlock);
continue;
}
simple_unlock(&mntvnode_slock);
mutex_exit(&mntvnode_lock);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
if (error) {
simple_lock(&mntvnode_slock);
if (error == ENOENT)
mutex_enter(&mntvnode_lock);
if (error == ENOENT) {
(void)vunmark(mvp);
goto loop;
}
continue;
}
if ((error = VOP_FSYNC(vp, cred,
waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
allerror = error;
vput(vp);
simple_lock(&mntvnode_slock);
mutex_enter(&mntvnode_lock);
}
simple_unlock(&mntvnode_slock);
mutex_exit(&mntvnode_lock);
vfree(mvp);
/*
* Force stale file system control information to be flushed.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: msdosfs_vnops.c,v 1.45 2007/12/28 17:46:48 reinoud Exp $ */
/* $NetBSD: msdosfs_vnops.c,v 1.46 2008/01/02 11:48:42 ad Exp $ */
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
@ -48,7 +48,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: msdosfs_vnops.c,v 1.45 2007/12/28 17:46:48 reinoud Exp $");
__KERNEL_RCSID(0, "$NetBSD: msdosfs_vnops.c,v 1.46 2008/01/02 11:48:42 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -213,10 +213,10 @@ msdosfs_close(v)
struct vnode *vp = ap->a_vp;
struct denode *dep = VTODE(vp);
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
if (vp->v_usecount > 1)
DETIMES(dep, NULL, NULL, NULL, dep->de_pmp->pm_gmtoff);
simple_unlock(&vp->v_interlock);
mutex_exit(&vp->v_interlock);
return (0);
}
@ -659,7 +659,7 @@ msdosfs_write(v)
*/
if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
(uio->uio_offset >> 16) << 16, PGO_CLEANIT);
}
@ -668,7 +668,7 @@ msdosfs_write(v)
/* set final size */
uvm_vnp_setsize(vp, dep->de_FileSize);
if (error == 0 && ioflag & IO_SYNC) {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
error = VOP_PUTPAGES(vp, trunc_page(oldoff),
round_page(oldoff + bytelen), PGO_CLEANIT | PGO_SYNCIO);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: ntfs_inode.h,v 1.4 2007/03/04 06:03:00 christos Exp $ */
/* $NetBSD: ntfs_inode.h,v 1.5 2008/01/02 11:48:42 ad Exp $ */
/*-
* Copyright (c) 1998, 1999 Semen Ustimenko
@ -73,7 +73,7 @@ struct ntnode {
/* locking */
struct lock i_lock;
struct simplelock i_interlock;
kmutex_t i_interlock;
int i_usecount;
LIST_HEAD(,fnode) i_fnlist;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ntfs_subr.c,v 1.33 2007/10/10 20:42:24 ad Exp $ */
/* $NetBSD: ntfs_subr.c,v 1.34 2008/01/02 11:48:42 ad Exp $ */
/*-
* Copyright (c) 1998, 1999 Semen Ustimenko (semenu@FreeBSD.org)
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ntfs_subr.c,v 1.33 2007/10/10 20:42:24 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: ntfs_subr.c,v 1.34 2008/01/02 11:48:42 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -385,7 +385,7 @@ ntfs_ntget(ip)
dprintf(("ntfs_ntget: get ntnode %llu: %p, usecount: %d\n",
(unsigned long long)ip->i_number, ip, ip->i_usecount));
simple_lock(&ip->i_interlock);
mutex_enter(&ip->i_interlock);
ip->i_usecount++;
lockmgr(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
@ -445,7 +445,7 @@ ntfs_ntlookup(
/* init lock and lock the newborn ntnode */
lockinit(&ip->i_lock, PINOD, "ntnode", 0, LK_EXCLUSIVE);
simple_lock_init(&ip->i_interlock);
mutex_init(&ip->i_interlock, MUTEX_DEFAULT, IPL_NONE);
ntfs_ntget(ip);
ntfs_nthashins(ip);
@ -475,7 +475,7 @@ ntfs_ntput(ip)
dprintf(("ntfs_ntput: rele ntnode %llu: %p, usecount: %d\n",
(unsigned long long)ip->i_number, ip, ip->i_usecount));
simple_lock(&ip->i_interlock);
mutex_enter(&ip->i_interlock);
ip->i_usecount--;
#ifdef DIAGNOSTIC
@ -501,6 +501,8 @@ ntfs_ntput(ip)
LIST_REMOVE(vap,va_list);
ntfs_freentvattr(vap);
}
mutex_destroy(&ip->i_interlock);
lockdestroy(&ip->i_lock);
FREE(ip, M_NTFSNTNODE);
}
}
@ -512,9 +514,9 @@ void
ntfs_ntref(ip)
struct ntnode *ip;
{
simple_lock(&ip->i_interlock);
mutex_enter(&ip->i_interlock);
ip->i_usecount++;
simple_unlock(&ip->i_interlock);
mutex_exit(&ip->i_interlock);
dprintf(("ntfs_ntref: ino %llu, usecount: %d\n",
(unsigned long long)ip->i_number, ip->i_usecount));
@ -531,13 +533,13 @@ ntfs_ntrele(ip)
dprintf(("ntfs_ntrele: rele ntnode %llu: %p, usecount: %d\n",
(unsigned long long)ip->i_number, ip, ip->i_usecount));
simple_lock(&ip->i_interlock);
mutex_enter(&ip->i_interlock);
ip->i_usecount--;
if (ip->i_usecount < 0)
panic("ntfs_ntrele: ino: %llu usecount: %d ",
(unsigned long long)ip->i_number, ip->i_usecount);
simple_unlock(&ip->i_interlock);
mutex_exit(&ip->i_interlock);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: ptyfs_subr.c,v 1.11 2007/12/08 19:29:44 pooka Exp $ */
/* $NetBSD: ptyfs_subr.c,v 1.12 2008/01/02 11:48:43 ad Exp $ */
/*
* Copyright (c) 1993
@ -73,7 +73,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ptyfs_subr.c,v 1.11 2007/12/08 19:29:44 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: ptyfs_subr.c,v 1.12 2008/01/02 11:48:43 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -239,7 +239,6 @@ ptyfs_allocvp(struct mount *mp, struct vnode **vpp, ptyfstype type, int pty,
vp->v_vflag &= ~VV_LOCKSWORK;
VOP_UNLOCK(vp, 0);
vp->v_op = spec_vnodeop_p;
vrele(vp);
vgone(vp);
lockmgr(&nvp->v_lock, LK_EXCLUSIVE, &nvp->v_interlock);
/*
@ -374,7 +373,7 @@ loop:
vp = PTYFSTOV(pp);
if (pty == pp->ptyfs_pty && pp->ptyfs_type == type &&
vp->v_mount == mp) {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&ptyfs_used_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
goto loop;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ptyfs_vnops.c,v 1.26 2007/11/26 19:01:49 pooka Exp $ */
/* $NetBSD: ptyfs_vnops.c,v 1.27 2008/01/02 11:48:43 ad Exp $ */
/*
* Copyright (c) 1993, 1995
@ -76,7 +76,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ptyfs_vnops.c,v 1.26 2007/11/26 19:01:49 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: ptyfs_vnops.c,v 1.27 2008/01/02 11:48:43 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -745,10 +745,10 @@ ptyfs_close(void *v)
struct vnode *vp = ap->a_vp;
struct ptyfsnode *ptyfs = VTOPTYFS(vp);
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
if (vp->v_usecount > 1)
PTYFS_ITIMES(ptyfs, NULL, NULL, NULL);
simple_unlock(&vp->v_interlock);
mutex_exit(&vp->v_interlock);
switch (ptyfs->ptyfs_type) {
case PTYFSpts:

View File

@ -1,4 +1,4 @@
/* $NetBSD: puffs_msgif.c,v 1.61 2007/12/05 12:11:56 pooka Exp $ */
/* $NetBSD: puffs_msgif.c,v 1.62 2008/01/02 11:48:43 ad Exp $ */
/*
* Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.61 2007/12/05 12:11:56 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.62 2008/01/02 11:48:43 ad Exp $");
#include <sys/param.h>
#include <sys/fstrans.h>
@ -927,7 +927,7 @@ puffsop_flush(struct puffs_mount *pmp, struct puffs_flush *pf)
break;
}
simple_lock(&vp->v_uobj.vmobjlock);
mutex_enter(&vp->v_uobj.vmobjlock);
rv = VOP_PUTPAGES(vp, offlo, offhi, flags);
break;
@ -1032,18 +1032,18 @@ puffs_msgif_close(void *this)
* wait for syncer_mutex. Otherwise the mointpoint can be
* wiped out while we wait.
*/
simple_lock(&mp->mnt_slock);
mutex_enter(&mp->mnt_mutex);
mp->mnt_wcnt++;
simple_unlock(&mp->mnt_slock);
mutex_exit(&mp->mnt_mutex);
mutex_enter(&syncer_mutex);
simple_lock(&mp->mnt_slock);
mutex_enter(&mp->mnt_mutex);
mp->mnt_wcnt--;
if (mp->mnt_wcnt == 0)
wakeup(&mp->mnt_wcnt);
gone = mp->mnt_iflag & IMNT_GONE;
simple_unlock(&mp->mnt_slock);
mutex_exit(&mp->mnt_mutex);
if (gone) {
mutex_exit(&syncer_mutex);
return 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: puffs_node.c,v 1.8 2007/11/17 21:55:29 pooka Exp $ */
/* $NetBSD: puffs_node.c,v 1.9 2008/01/02 11:48:43 ad Exp $ */
/*
* Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: puffs_node.c,v 1.8 2007/11/17 21:55:29 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: puffs_node.c,v 1.9 2008/01/02 11:48:43 ad Exp $");
#include <sys/param.h>
#include <sys/hash.h>
@ -136,10 +136,10 @@ puffs_getvnode(struct mount *mp, void *cookie, enum vtype type,
*/
/* So mp is not dead yet.. good.. inform new vnode of its master */
simple_lock(&mntvnode_slock);
mutex_enter(&mntvnode_lock);
TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
simple_unlock(&mntvnode_slock);
vp->v_mount = mp;
mutex_exit(&mntvnode_lock);
/*
* clerical tasks & footwork
@ -167,7 +167,6 @@ puffs_getvnode(struct mount *mp, void *cookie, enum vtype type,
*/
vp->v_op = spec_vnodeop_p;
vp->v_vflag &= ~VV_LOCKSWORK;
vrele(vp);
vgone(vp); /* cya */
/* init "new" vnode */
@ -374,7 +373,7 @@ puffs_makeroot(struct puffs_mount *pmp)
mutex_enter(&pmp->pmp_lock);
vp = pmp->pmp_root;
if (vp) {
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&pmp->pmp_lock);
if (vget(vp, LK_INTERLOCK) == 0)
return 0;
@ -452,7 +451,7 @@ puffs_cookie2vnode(struct puffs_mount *pmp, void *cookie, int lock,
return PUFFS_NOSUCHCOOKIE;
}
vp = pnode->pn_vp;
simple_lock(&vp->v_interlock);
mutex_enter(&vp->v_interlock);
mutex_exit(&pmp->pmp_lock);
vgetflags = LK_INTERLOCK;

Some files were not shown because too many files have changed in this diff Show More