Move the handling of PG_PAGEOUT from uvm_aio_aiodone_pages() to
uvm_page_unbusy() so that all callers of uvm_page_unbusy() don't need to handle this flag separately. Split out the pages part of uvm_aio_aiodone() into uvm_aio_aiodone_pages() in rump just like in the real kernel. In ZFS functions that can fail to copy data between the ARC and VM pages, use uvm_aio_aiodone_pages() rather than uvm_page_unbusy() so that we can handle these "I/O" errors. Fixes PR 55702.
This commit is contained in:
parent
428a4a93f2
commit
9d18193c79
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: vm_vfs.c,v 1.38 2020/02/23 15:46:42 ad Exp $ */
|
||||
/* $NetBSD: vm_vfs.c,v 1.39 2020/10/18 18:22:29 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2008-2011 Antti Kantee. All Rights Reserved.
|
||||
@ -26,7 +26,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.38 2020/02/23 15:46:42 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.39 2020/10/18 18:22:29 chs Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
|
||||
@ -36,19 +36,37 @@ __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.38 2020/02/23 15:46:42 ad Exp $");
|
||||
#include <uvm/uvm.h>
|
||||
#include <uvm/uvm_readahead.h>
|
||||
|
||||
void
|
||||
uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
|
||||
{
|
||||
struct uvm_object *uobj = pgs[0]->uobject;
|
||||
struct vm_page *pg;
|
||||
int i;
|
||||
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
for (i = 0; i < npages; i++) {
|
||||
pg = pgs[i];
|
||||
KASSERT((pg->flags & PG_FAKE) == 0);
|
||||
}
|
||||
uvm_page_unbusy(pgs, npages);
|
||||
rw_exit(uobj->vmobjlock);
|
||||
}
|
||||
|
||||
/*
|
||||
* release resources held during async io. this is almost the
|
||||
* same as uvm_aio_aiodone() from uvm_pager.c and only lacks the
|
||||
* call to uvm_aio_aiodone_pages(): unbusies pages directly here.
|
||||
* Release resources held during async io.
|
||||
*/
|
||||
void
|
||||
uvm_aio_aiodone(struct buf *bp)
|
||||
{
|
||||
struct uvm_object *uobj = NULL;
|
||||
int i, npages = bp->b_bufsize >> PAGE_SHIFT;
|
||||
int npages = bp->b_bufsize >> PAGE_SHIFT;
|
||||
struct vm_page **pgs;
|
||||
vaddr_t va;
|
||||
int pageout = 0;
|
||||
int i, error;
|
||||
bool write;
|
||||
|
||||
error = bp->b_error;
|
||||
write = BUF_ISWRITE(bp);
|
||||
|
||||
KASSERT(npages > 0);
|
||||
pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
|
||||
@ -59,27 +77,15 @@ uvm_aio_aiodone(struct buf *bp)
|
||||
if (uobj == NULL) {
|
||||
uobj = pgs[i]->uobject;
|
||||
KASSERT(uobj != NULL);
|
||||
rw_enter(uobj->vmobjlock, RW_WRITER);
|
||||
} else {
|
||||
KASSERT(uobj == pgs[i]->uobject);
|
||||
}
|
||||
|
||||
if (pgs[i]->flags & PG_PAGEOUT) {
|
||||
KASSERT((pgs[i]->flags & PG_FAKE) == 0);
|
||||
pageout++;
|
||||
pgs[i]->flags &= ~PG_PAGEOUT;
|
||||
pgs[i]->flags |= PG_RELEASED;
|
||||
}
|
||||
}
|
||||
KASSERT(rw_write_held(uobj->vmobjlock));
|
||||
|
||||
uvm_page_unbusy(pgs, npages);
|
||||
rw_exit(uobj->vmobjlock);
|
||||
|
||||
uvm_pagermapout((vaddr_t)bp->b_data, npages);
|
||||
uvm_pageout_done(pageout);
|
||||
|
||||
if (BUF_ISWRITE(bp) && (bp->b_cflags & BC_AGE) != 0) {
|
||||
uvm_aio_aiodone_pages(pgs, npages, write, error);
|
||||
|
||||
if (write && (bp->b_cflags & BC_AGE) != 0) {
|
||||
mutex_enter(bp->b_objlock);
|
||||
vwakeup(bp);
|
||||
mutex_exit(bp->b_objlock);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_page.c,v 1.247 2020/09/20 10:30:05 skrll Exp $ */
|
||||
/* $NetBSD: uvm_page.c,v 1.248 2020/10/18 18:22:29 chs Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
|
||||
@ -95,7 +95,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.247 2020/09/20 10:30:05 skrll Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.248 2020/10/18 18:22:29 chs Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_uvm.h"
|
||||
@ -1602,9 +1602,10 @@ void
|
||||
uvm_page_unbusy(struct vm_page **pgs, int npgs)
|
||||
{
|
||||
struct vm_page *pg;
|
||||
int i;
|
||||
int i, pageout_done;
|
||||
UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
|
||||
|
||||
pageout_done = 0;
|
||||
for (i = 0; i < npgs; i++) {
|
||||
pg = pgs[i];
|
||||
if (pg == NULL || pg == PGO_DONTCARE) {
|
||||
@ -1613,7 +1614,13 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs)
|
||||
|
||||
KASSERT(uvm_page_owner_locked_p(pg, true));
|
||||
KASSERT(pg->flags & PG_BUSY);
|
||||
KASSERT((pg->flags & PG_PAGEOUT) == 0);
|
||||
|
||||
if (pg->flags & PG_PAGEOUT) {
|
||||
pg->flags &= ~PG_PAGEOUT;
|
||||
pg->flags |= PG_RELEASED;
|
||||
pageout_done++;
|
||||
atomic_inc_uint(&uvmexp.pdfreed);
|
||||
}
|
||||
if (pg->flags & PG_RELEASED) {
|
||||
UVMHIST_LOG(ubchist, "releasing pg %#jx",
|
||||
(uintptr_t)pg, 0, 0, 0);
|
||||
@ -1632,6 +1639,9 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs)
|
||||
UVM_PAGE_OWN(pg, NULL);
|
||||
}
|
||||
}
|
||||
if (pageout_done != 0) {
|
||||
uvm_pageout_done(pageout_done);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $ */
|
||||
/* $NetBSD: uvm_pager.c,v 1.130 2020/10/18 18:22:29 chs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -32,7 +32,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.129 2020/08/14 09:06:15 chs Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.130 2020/10/18 18:22:29 chs Exp $");
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
#include "opt_readahead.h"
|
||||
@ -456,18 +456,6 @@ uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
|
||||
uvm_pageunlock(pg);
|
||||
}
|
||||
|
||||
/*
|
||||
* do accounting for pagedaemon i/o and arrange to free
|
||||
* the pages instead of just unbusying them.
|
||||
*/
|
||||
|
||||
if (pg->flags & PG_PAGEOUT) {
|
||||
pg->flags &= ~PG_PAGEOUT;
|
||||
pageout_done++;
|
||||
atomic_inc_uint(&uvmexp.pdfreed);
|
||||
pg->flags |= PG_RELEASED;
|
||||
}
|
||||
|
||||
#if defined(VMSWAP)
|
||||
/*
|
||||
* for swap pages, unlock everything for this page now.
|
||||
|
Loading…
Reference in New Issue
Block a user