Remove buffer reference counting, now that it's safe to destroy b_busy after
waking any waiters.
This commit is contained in:
parent
e0bb7e8edd
commit
83f44bb607
|
@ -1,7 +1,7 @@
|
|||
/* $NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $ */
|
||||
/* $NetBSD: vfs_bio.c,v 1.291 2020/04/10 17:18:04 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
|
||||
* Copyright (c) 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to The NetBSD Foundation
|
||||
|
@ -123,7 +123,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.291 2020/04/10 17:18:04 ad Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_bufcache.h"
|
||||
|
@ -228,8 +228,6 @@ static int checkfreelist(buf_t *, struct bqueue *, int);
|
|||
#endif
|
||||
static void biointr(void *);
|
||||
static void biodone2(buf_t *);
|
||||
static void bref(buf_t *);
|
||||
static void brele(buf_t *);
|
||||
static void sysctl_kern_buf_setup(void);
|
||||
static void sysctl_vm_buf_setup(void);
|
||||
|
||||
|
@ -410,38 +408,6 @@ bremfree(buf_t *bp)
|
|||
#endif /* defined(DIAGNOSTIC) */
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a reference to an buffer structure that came from buf_cache.
|
||||
*/
|
||||
static inline void
|
||||
bref(buf_t *bp)
|
||||
{
|
||||
|
||||
KASSERT(mutex_owned(&bufcache_lock));
|
||||
KASSERT(bp->b_refcnt > 0);
|
||||
|
||||
bp->b_refcnt++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free an unused buffer structure that came from buf_cache.
|
||||
*/
|
||||
static inline void
|
||||
brele(buf_t *bp)
|
||||
{
|
||||
|
||||
KASSERT(mutex_owned(&bufcache_lock));
|
||||
KASSERT(bp->b_refcnt > 0);
|
||||
|
||||
if (bp->b_refcnt-- == 1) {
|
||||
buf_destroy(bp);
|
||||
#ifdef DEBUG
|
||||
memset((char *)bp, 0, sizeof(*bp));
|
||||
#endif
|
||||
pool_cache_put(buf_cache, bp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* note that for some ports this is used by pmap bootstrap code to
|
||||
* determine kva size.
|
||||
|
@ -1054,7 +1020,6 @@ brelsel(buf_t *bp, int set)
|
|||
KASSERT(bp != NULL);
|
||||
KASSERT(mutex_owned(&bufcache_lock));
|
||||
KASSERT(!cv_has_waiters(&bp->b_done));
|
||||
KASSERT(bp->b_refcnt > 0);
|
||||
|
||||
SET(bp->b_cflags, set);
|
||||
|
||||
|
@ -1175,14 +1140,19 @@ already_queued:
|
|||
* prevent a thundering herd: many LWPs simultaneously awakening and
|
||||
* competing for the buffer's lock. Testing in 2019 revealed this
|
||||
* to reduce contention on bufcache_lock tenfold during a kernel
|
||||
* compile. Elsewhere, when the buffer is changing identity, being
|
||||
* disposed of, or moving from one list to another, we wake all lock
|
||||
* requestors.
|
||||
* compile. Here and elsewhere, when the buffer is changing
|
||||
* identity, being disposed of, or moving from one list to another,
|
||||
* we wake all lock requestors.
|
||||
*/
|
||||
cv_signal(&bp->b_busy);
|
||||
|
||||
if (bp->b_bufsize <= 0)
|
||||
brele(bp);
|
||||
if (bp->b_bufsize <= 0) {
|
||||
cv_broadcast(&bp->b_busy);
|
||||
buf_destroy(bp);
|
||||
#ifdef DEBUG
|
||||
memset((char *)bp, 0, sizeof(*bp));
|
||||
#endif
|
||||
pool_cache_put(buf_cache, bp);
|
||||
} else
|
||||
cv_signal(&bp->b_busy);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1494,7 +1464,6 @@ getnewbuf(int slpflag, int slptimeo, int from_bufq)
|
|||
}
|
||||
|
||||
KASSERT(ISSET(bp->b_cflags, BC_BUSY));
|
||||
KASSERT(bp->b_refcnt > 0);
|
||||
KASSERT(!cv_has_waiters(&bp->b_done));
|
||||
|
||||
/*
|
||||
|
@ -1603,7 +1572,6 @@ biowait(buf_t *bp)
|
|||
BIOHIST_FUNC(__func__);
|
||||
|
||||
KASSERT(ISSET(bp->b_cflags, BC_BUSY));
|
||||
KASSERT(bp->b_refcnt > 0);
|
||||
|
||||
SDT_PROBE1(io, kernel, , wait__start, bp);
|
||||
|
||||
|
@ -2172,7 +2140,6 @@ buf_init(buf_t *bp)
|
|||
bp->b_oflags = 0;
|
||||
bp->b_objlock = &buffer_lock;
|
||||
bp->b_iodone = NULL;
|
||||
bp->b_refcnt = 1;
|
||||
bp->b_dev = NODEV;
|
||||
bp->b_vnbufs.le_next = NOLIST;
|
||||
BIO_SETPRIO(bp, BPRIO_DEFAULT);
|
||||
|
@ -2201,7 +2168,6 @@ bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock)
|
|||
goto out;
|
||||
}
|
||||
bp->b_cflags |= BC_WANTED;
|
||||
bref(bp);
|
||||
if (interlock != NULL)
|
||||
mutex_exit(interlock);
|
||||
if (intr) {
|
||||
|
@ -2211,16 +2177,18 @@ bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock)
|
|||
error = cv_timedwait(&bp->b_busy, &bufcache_lock,
|
||||
timo);
|
||||
}
|
||||
brele(bp);
|
||||
/*
|
||||
* At this point the buffer may be gone: don't touch it
|
||||
* again. The caller needs to find it again and retry.
|
||||
*/
|
||||
if (interlock != NULL)
|
||||
mutex_enter(interlock);
|
||||
if (error != 0)
|
||||
goto out;
|
||||
error = EPASSTHROUGH;
|
||||
goto out;
|
||||
if (error == 0)
|
||||
error = EPASSTHROUGH;
|
||||
} else {
|
||||
bp->b_cflags |= BC_BUSY;
|
||||
error = 0;
|
||||
}
|
||||
bp->b_cflags |= BC_BUSY;
|
||||
error = 0;
|
||||
|
||||
out: SDT_PROBE5(io, kernel, , bbusy__done,
|
||||
bp, intr, timo, interlock, error);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: buf.h,v 1.131 2019/08/26 10:24:39 msaitoh Exp $ */
|
||||
/* $NetBSD: buf.h,v 1.132 2020/04/10 17:18:04 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2007, 2008 The NetBSD Foundation, Inc.
|
||||
|
@ -149,7 +149,6 @@ struct buf {
|
|||
off_t b_dcookie; /* NFS: Offset cookie if dir block */
|
||||
|
||||
kcondvar_t b_busy; /* c: threads waiting on buf */
|
||||
u_int b_refcnt; /* c: refcount for b_busy */
|
||||
void *b_unused; /* : unused */
|
||||
LIST_ENTRY(buf) b_hash; /* c: hash chain */
|
||||
LIST_ENTRY(buf) b_vnbufs; /* c: associated vnode */
|
||||
|
|
Loading…
Reference in New Issue