- fix a use-after-free bug in uvm_km_free.

(after uvm_km_pgremove frees pages, the following pmap_remove touches them.)
- acquire the object lock for operations on pmap_kernel as it can actually be
  raced with P->V operations.  eg. pagedaemon.
This commit is contained in:
yamt 2011-07-05 14:03:06 +00:00
parent 7eec89b991
commit b456aa6a56
2 changed files with 11 additions and 65 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.109 2011/06/12 03:36:03 rmind Exp $ */
/* $NetBSD: uvm_km.c,v 1.110 2011/07/05 14:03:06 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -122,7 +122,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.109 2011/06/12 03:36:03 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.110 2011/07/05 14:03:06 yamt Exp $");
#include "opt_uvmhist.h"
@ -384,10 +384,7 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
}
/*
* uvm_km_pgremove: remove pages from a kernel uvm_object.
*
* => when you unmap a part of anonymous kernel memory you want to toss
* the pages right away. (this gets called from uvm_unmap_...).
* uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
*/
void
@ -406,7 +403,7 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
mutex_enter(uobj->vmobjlock);
pmap_remove(pmap_kernel(), startva, endva);
for (curoff = start; curoff < end; curoff = nextoff) {
nextoff = curoff + PAGE_SIZE;
pg = uvm_pagelookup(uobj, curoff);
@ -474,6 +471,7 @@ uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg);
KASSERT(pg->uobject == NULL && pg->uanon == NULL);
KASSERT((pg->flags & PG_BUSY) == 0);
uvm_pagefree(pg);
}
}
@ -655,15 +653,8 @@ uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
size = round_page(size);
if (flags & UVM_KMF_PAGEABLE) {
/*
* No need to lock for pmap, since the kernel is always
* self-consistent. The pages cannot be in use elsewhere.
*/
uvm_km_pgremove(addr, addr + size);
pmap_remove(pmap_kernel(), addr, addr + size);
} else if (flags & UVM_KMF_WIRED) {
/*
* Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
@ -722,6 +713,8 @@ again:
return 0;
}
}
pg->flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
pmap_update(pmap_kernel());

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.299 2011/06/13 23:19:40 rmind Exp $ */
/* $NetBSD: uvm_map.c,v 1.300 2011/07/05 14:03:07 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.299 2011/06/13 23:19:40 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.300 2011/07/05 14:03:07 yamt Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -2367,55 +2367,8 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
}
} else if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
/*
* note: kernel object mappings are currently used in
* two ways:
* [1] "normal" mappings of pages in the kernel object
* [2] uvm_km_valloc'd allocations in which we
* pmap_enter in some non-kernel-object page
* (e.g. vmapbuf).
*
* for case [1], we need to remove the mapping from
* the pmap and then remove the page from the kernel
* object (because, once pages in a kernel object are
* unmapped they are no longer needed, unlike, say,
* a vnode where you might want the data to persist
* until flushed out of a queue).
*
* for case [2], we need to remove the mapping from
* the pmap. there shouldn't be any pages at the
* specified offset in the kernel object [but it
* doesn't hurt to call uvm_km_pgremove just to be
* safe?]
*
* uvm_km_pgremove currently does the following:
* for pages in the kernel object in range:
* - drops the swap slot
* - uvm_pagefree the page
*/
/*
* remove mappings from pmap and drop the pages
* from the object. offsets are always relative
* to vm_map_min(kernel_map).
*
* don't need to lock object as the kernel is
* always self-consistent.
*/
pmap_remove(pmap_kernel(), entry->start,
entry->start + len);
uvm_km_pgremove(entry->start, entry->end);
/*
* null out kernel_object reference, we've just
* dropped it
*/
entry->etype &= ~UVM_ET_OBJ;
entry->object.uvm_obj = NULL;
panic("%s: kernel object %p %p\n",
__func__, map, entry);
} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
/*
* remove mappings the standard way. lock object