Drop & re-acquire vmobjlock less often.
This commit is contained in:
parent
d6e424b62c
commit
5ba8a0826b
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $ */
|
||||
/* $NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
|
||||
@ -40,7 +40,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.12 2020/03/08 18:40:29 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.13 2020/05/19 21:45:35 ad Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/pool.h>
|
||||
@ -126,6 +126,8 @@ ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
|
||||
DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
|
||||
__func__, uobj, off, endoff));
|
||||
|
||||
KASSERT(rw_write_held(uobj->vmobjlock));
|
||||
|
||||
/*
|
||||
* Don't issue read-ahead if the last page of the range is already cached.
|
||||
* The assumption is that since the access is sequential, the intermediate
|
||||
@ -133,9 +135,7 @@ ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
|
||||
* too. This speeds up I/O using cache, since it avoids lookups and temporary
|
||||
* allocations done by full pgo_get.
|
||||
*/
|
||||
rw_enter(uobj->vmobjlock, RW_READER);
|
||||
struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1));
|
||||
rw_exit(uobj->vmobjlock);
|
||||
if (pg != NULL) {
|
||||
DPRINTF(("%s: off=%" PRIu64 ", sz=%zu already cached\n",
|
||||
__func__, off, sz));
|
||||
@ -162,9 +162,9 @@ ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
|
||||
* use UVM_ADV_RANDOM to avoid recursion.
|
||||
*/
|
||||
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
|
||||
&npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP);
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n",
|
||||
__func__, off, bytelen, error));
|
||||
if (error != 0 && error != EBUSY) {
|
||||
@ -332,9 +332,7 @@ do_readahead:
|
||||
if (rasize >= RA_MINSIZE) {
|
||||
off_t next;
|
||||
|
||||
rw_exit(uobj->vmobjlock);
|
||||
next = ra_startio(uobj, raoff, rasize);
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
ra->ra_next = next;
|
||||
}
|
||||
}
|
||||
@ -362,6 +360,8 @@ uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
|
||||
if (size > RA_WINSIZE_MAX) {
|
||||
size = RA_WINSIZE_MAX;
|
||||
}
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
ra_startio(uobj, off, size);
|
||||
rw_exit(uobj->vmobjlock);
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user