fix unreasonably frequent "killed: out of swap" on systems which have

little or no swap.
- even on a severe swap shortage, if we have some amount of file-backed pages,
  don't bother to kill processes.
- if all pages in queue will be likely reactivated, just give up
  page type balancing rather than spinning unnecessarily.
This commit is contained in:
yamt 2005-04-12 13:11:45 +00:00
parent 1bf2f3ae6e
commit 01c07ef7bd
4 changed files with 58 additions and 12 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.91 2005/02/28 15:33:04 chs Exp $ */
/* $NetBSD: uvm_fault.c,v 1.92 2005/04/12 13:11:45 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.91 2005/02/28 15:33:04 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.92 2005/04/12 13:11:45 yamt Exp $");
#include "opt_uvmhist.h"
@ -1190,7 +1190,7 @@ ReFault:
uvm_anfree(anon);
}
uvmfault_unlockall(&ufi, amap, uobj, oanon);
if (anon == NULL || uvm_swapisfull()) {
if (anon == NULL || !uvm_reclaimable()) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
uvmexp.fltnoanon++;
@ -1254,7 +1254,7 @@ ReFault:
if (anon != oanon)
simple_unlock(&anon->an_lock);
uvmfault_unlockall(&ufi, amap, uobj, oanon);
if (uvm_swapisfull()) {
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
/* XXX instrumentation */
@ -1558,7 +1558,7 @@ Case2:
/* unlock and fail ... */
uvmfault_unlockall(&ufi, amap, uobj, NULL);
if (anon == NULL || uvm_swapisfull()) {
if (anon == NULL || !uvm_reclaimable()) {
UVMHIST_LOG(maphist, " promote: out of VM",
0,0,0,0);
uvmexp.fltnoanon++;
@ -1667,7 +1667,7 @@ Case2:
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
uvmfault_unlockall(&ufi, amap, uobj, anon);
if (uvm_swapisfull()) {
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
/* XXX instrumentation */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.79 2005/04/01 12:37:27 yamt Exp $ */
/* $NetBSD: uvm_km.c,v 1.80 2005/04/12 13:11:45 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.79 2005/04/01 12:37:27 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.80 2005/04/12 13:11:45 yamt Exp $");
#include "opt_uvmhist.h"
@ -596,7 +596,7 @@ uvm_km_alloc(map, size, align, flags)
if (__predict_false(pg == NULL)) {
if ((flags & UVM_KMF_NOWAIT) ||
((flags & UVM_KMF_CANFAIL) && uvm_swapisfull())) {
((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
/* free everything! */
uvm_km_free(map, kva, size,
flags & UVM_KMF_TYPEMASK);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.61 2005/01/30 17:23:05 chs Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.62 2005/04/12 13:11:45 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.61 2005/01/30 17:23:05 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.62 2005/04/12 13:11:45 yamt Exp $");
#include "opt_uvmhist.h"
@ -423,6 +423,9 @@ uvmpd_scan_inactive(pglst)
anonreact = anonunder || (!anonover && (fileover || execover));
filereact = fileunder || (!fileover && (anonover || execover));
execreact = execunder || (!execover && (anonover || fileover));
if (filereact && execreact && (anonreact || uvm_swapisfull())) {
anonreact = filereact = execreact = FALSE;
}
for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
uobj = NULL;
anon = NULL;
@ -911,3 +914,45 @@ uvmpd_scan(void)
simple_unlock(slock);
}
}
/*
* uvm_reclaimable: decide whether to wait for pagedaemon.
*
* => return TRUE if it seems to be worth to do uvm_wait.
*
* XXX should be tunable.
* XXX should consider pools, etc?
*/
boolean_t
uvm_reclaimable(void)
{
int filepages;
/*
* if swap is not full, no problem.
*/
if (!uvm_swapisfull()) {
return TRUE;
}
/*
* file-backed pages can be reclaimed even when swap is full.
* if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
*
* XXX assume the worst case, ie. all wired pages are file-backed.
*/
filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
if (filepages >= MIN((uvmexp.active + uvmexp.inactive) >> 4,
5 * 1024 * 1024 >> PAGE_SHIFT)) {
return TRUE;
}
/*
* kill the process, fail allocation, etc..
*/
return FALSE;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.h,v 1.10 2004/03/24 07:55:01 junyoung Exp $ */
/* $NetBSD: uvm_pdaemon.h,v 1.11 2005/04/12 13:11:45 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -80,6 +80,7 @@
*/
void uvm_wait(const char *);
boolean_t uvm_reclaimable(void);
#endif /* _KERNEL */