Forward some UVM from matt-nb5-mips64. Add UVM_KMF_COLORMATCH flag.
When uvm_map gets passed UVM_FLAG_COLORMATCH, the align argument contains the color of the starting address to be allocated (0..colormask). When uvm_km_alloc is passed UVM_KMF_COLORMATCH (which can only be used with UVM_KMF_VAONLY), the align argument contain the color of the starting address to be allocated. Change uvm_pagermapin to use this. When mapping user pages in the kernel, if colormatch is used with the color of the starting user page then the kernel mapping will be congruent with the existing user mappings.
This commit is contained in:
parent
7f7e1ce392
commit
207bff18bc
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_extern.h,v 1.175 2011/08/27 09:11:53 christos Exp $ */
|
||||
/* $NetBSD: uvm_extern.h,v 1.176 2011/09/01 06:40:28 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -168,6 +168,7 @@ typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */
|
||||
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
|
||||
#define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */
|
||||
#define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */
|
||||
#define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH /* start at color in align */
|
||||
|
||||
/*
|
||||
* the following defines the strategies for uvm_pagealloc_strat()
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_km.c,v 1.110 2011/07/05 14:03:06 yamt Exp $ */
|
||||
/* $NetBSD: uvm_km.c,v 1.111 2011/09/01 06:40:28 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -122,7 +122,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.110 2011/07/05 14:03:06 yamt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.111 2011/09/01 06:40:28 matt Exp $");
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
|
||||
@ -533,6 +533,8 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
|
||||
KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
|
||||
(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
|
||||
(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
|
||||
KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
|
||||
KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
|
||||
|
||||
/*
|
||||
* setup for call
|
||||
@ -551,7 +553,8 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
|
||||
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
|
||||
align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
|
||||
UVM_ADV_RANDOM,
|
||||
(flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
|
||||
(flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
|
||||
| UVM_KMF_COLORMATCH))
|
||||
| UVM_FLAG_QUANTUM)) != 0)) {
|
||||
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
|
||||
return(0);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_map.c,v 1.303 2011/08/06 17:25:03 rmind Exp $ */
|
||||
/* $NetBSD: uvm_map.c,v 1.304 2011/09/01 06:40:28 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -66,7 +66,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.303 2011/08/06 17:25:03 rmind Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.304 2011/09/01 06:40:28 matt Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_uvmhist.h"
|
||||
@ -306,7 +306,7 @@ static vsize_t uvm_kmapent_overhead(vsize_t);
|
||||
static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
|
||||
static void uvm_map_reference_amap(struct vm_map_entry *, int);
|
||||
static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
|
||||
struct vm_map_entry *);
|
||||
int, struct vm_map_entry *);
|
||||
static void uvm_map_unreference_amap(struct vm_map_entry *, int);
|
||||
|
||||
int _uvm_map_sanity(struct vm_map *);
|
||||
@ -1875,7 +1875,7 @@ failed:
|
||||
*/
|
||||
static int
|
||||
uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
|
||||
vsize_t align, int topdown, struct vm_map_entry *entry)
|
||||
vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
|
||||
{
|
||||
vaddr_t end;
|
||||
|
||||
@ -1888,7 +1888,27 @@ uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
|
||||
if (uoffset != UVM_UNKNOWN_OFFSET)
|
||||
PMAP_PREFER(uoffset, start, length, topdown);
|
||||
#endif
|
||||
if (align != 0) {
|
||||
if ((flags & UVM_FLAG_COLORMATCH) != 0) {
|
||||
KASSERT(align < uvmexp.ncolors);
|
||||
if (uvmexp.ncolors > 1) {
|
||||
const u_int colormask = uvmexp.colormask;
|
||||
const u_int colorsize = colormask + 1;
|
||||
vaddr_t hint = atop(*start);
|
||||
const u_int color = hint & colormask;
|
||||
if (color != align) {
|
||||
hint -= color; /* adjust to color boundary */
|
||||
KASSERT((hint & colormask) == 0);
|
||||
if (topdown) {
|
||||
if (align > color)
|
||||
hint -= colorsize;
|
||||
} else {
|
||||
if (align < color)
|
||||
hint += colorsize;
|
||||
}
|
||||
*start = ptoa(hint + align); /* adjust to color */
|
||||
}
|
||||
}
|
||||
} else if (align != 0) {
|
||||
if ((*start & (align - 1)) != 0) {
|
||||
if (topdown)
|
||||
*start &= ~(align - 1);
|
||||
@ -1944,7 +1964,8 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
|
||||
|
||||
UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
|
||||
map, hint, length, flags);
|
||||
KASSERT((align & (align - 1)) == 0);
|
||||
KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0);
|
||||
KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
|
||||
KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
|
||||
|
||||
uvm_map_check(map, "map_findspace entry");
|
||||
@ -2022,7 +2043,7 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
|
||||
* See if given hint fits in this gap.
|
||||
*/
|
||||
switch (uvm_map_space_avail(&hint, length,
|
||||
uoffset, align, topdown, entry)) {
|
||||
uoffset, align, flags, topdown, entry)) {
|
||||
case 1:
|
||||
goto found;
|
||||
case -1:
|
||||
@ -2053,7 +2074,7 @@ uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
|
||||
|
||||
/* Check slot before any entry */
|
||||
hint = topdown ? entry->next->start - length : entry->end;
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align,
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
|
||||
topdown, entry)) {
|
||||
case 1:
|
||||
goto found;
|
||||
@ -2122,7 +2143,7 @@ nextgap:
|
||||
hint = tmp->end;
|
||||
}
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align,
|
||||
topdown, tmp)) {
|
||||
flags, topdown, tmp)) {
|
||||
case 1:
|
||||
entry = tmp;
|
||||
goto found;
|
||||
@ -2144,7 +2165,7 @@ nextgap:
|
||||
hint = prev->end;
|
||||
}
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align,
|
||||
topdown, prev)) {
|
||||
flags, topdown, prev)) {
|
||||
case 1:
|
||||
entry = prev;
|
||||
goto found;
|
||||
@ -2185,7 +2206,7 @@ nextgap:
|
||||
hint = tmp->end;
|
||||
}
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align,
|
||||
topdown, tmp)) {
|
||||
flags, topdown, tmp)) {
|
||||
case 1:
|
||||
entry = tmp;
|
||||
goto found;
|
||||
@ -2211,7 +2232,7 @@ nextgap:
|
||||
|
||||
/* See if it fits. */
|
||||
switch (uvm_map_space_avail(&hint, length, uoffset, align,
|
||||
topdown, entry)) {
|
||||
flags, topdown, entry)) {
|
||||
case 1:
|
||||
goto found;
|
||||
case -1:
|
||||
@ -2393,7 +2414,8 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
|
||||
for (va = entry->start; va < entry->end;
|
||||
va += PAGE_SIZE) {
|
||||
if (pmap_extract(vm_map_pmap(map), va, NULL)) {
|
||||
panic("uvm_unmap_remove: has mapping");
|
||||
panic("%s: %#"PRIxVADDR" has mapping",
|
||||
__func__, va);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4703,7 +4725,7 @@ again:
|
||||
KASSERT(va != 0);
|
||||
#else
|
||||
error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
|
||||
0, mapflags, &args);
|
||||
VM_PGCOLOR_BUCKET(pg), mapflags | UVM_FLAG_COLORMATCH, &args);
|
||||
if (error) {
|
||||
uvm_pagefree(pg);
|
||||
return NULL;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: uvm_pager.c,v 1.103 2011/08/23 03:00:35 oki Exp $ */
|
||||
/* $NetBSD: uvm_pager.c,v 1.104 2011/09/01 06:40:28 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
@ -32,7 +32,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.103 2011/08/23 03:00:35 oki Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.104 2011/09/01 06:40:28 matt Exp $");
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
#include "opt_readahead.h"
|
||||
@ -101,7 +101,8 @@ uvm_pager_init(void)
|
||||
false, NULL);
|
||||
mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
|
||||
pager_map_wanted = false;
|
||||
emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
|
||||
emergva = uvm_km_alloc(kernel_map,
|
||||
round_page(MAXPHYS) + ptoa(uvmexp.ncolors), 0,
|
||||
UVM_KMF_VAONLY);
|
||||
#if defined(DEBUG)
|
||||
if (emergva == 0)
|
||||
@ -139,10 +140,12 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags)
|
||||
vaddr_t cva;
|
||||
struct vm_page *pp;
|
||||
vm_prot_t prot;
|
||||
const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
|
||||
const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
|
||||
const u_int first_color = VM_PGCOLOR_BUCKET(*pps);
|
||||
UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
|
||||
|
||||
UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
|
||||
UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, first_color=%u)",
|
||||
pps, npages, first_color, 0);
|
||||
|
||||
/*
|
||||
* compute protection. outgoing I/O only needs read
|
||||
@ -154,11 +157,12 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags)
|
||||
prot |= VM_PROT_WRITE;
|
||||
|
||||
ReStart:
|
||||
size = npages << PAGE_SHIFT;
|
||||
size = ptoa(npages);
|
||||
kva = 0; /* let system choose VA */
|
||||
|
||||
if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
|
||||
UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
|
||||
if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET,
|
||||
first_color, UVM_FLAG_COLORMATCH | UVM_FLAG_NOMERGE
|
||||
| (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
|
||||
if (pdaemon) {
|
||||
mutex_enter(&pager_map_wanted_lock);
|
||||
if (emerginuse) {
|
||||
@ -169,7 +173,7 @@ ReStart:
|
||||
}
|
||||
emerginuse = true;
|
||||
mutex_exit(&pager_map_wanted_lock);
|
||||
kva = emergva;
|
||||
kva = emergva + ptoa(first_color);
|
||||
/* The shift implicitly truncates to PAGE_SIZE */
|
||||
KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
|
||||
goto enter;
|
||||
@ -188,9 +192,10 @@ ReStart:
|
||||
|
||||
enter:
|
||||
/* got it */
|
||||
for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
|
||||
for (cva = kva; npages != 0; npages--, cva += PAGE_SIZE) {
|
||||
pp = *pps++;
|
||||
KASSERT(pp);
|
||||
// KASSERT(!((VM_PAGE_TO_PHYS(pp) ^ cva) & uvmexp.colormask));
|
||||
KASSERT(pp->flags & PG_BUSY);
|
||||
pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
|
||||
}
|
||||
@ -210,7 +215,7 @@ enter:
|
||||
void
|
||||
uvm_pagermapout(vaddr_t kva, int npages)
|
||||
{
|
||||
vsize_t size = npages << PAGE_SHIFT;
|
||||
vsize_t size = ptoa(npages);
|
||||
struct vm_map_entry *entries;
|
||||
UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
|
||||
|
||||
@ -220,10 +225,10 @@ uvm_pagermapout(vaddr_t kva, int npages)
|
||||
* duplicate uvm_unmap, but add in pager_map_wanted handling.
|
||||
*/
|
||||
|
||||
pmap_kremove(kva, npages << PAGE_SHIFT);
|
||||
pmap_kremove(kva, size);
|
||||
pmap_update(pmap_kernel());
|
||||
|
||||
if (kva == emergva) {
|
||||
if ((kva & ~ptoa(uvmexp.colormask)) == emergva) {
|
||||
mutex_enter(&pager_map_wanted_lock);
|
||||
emerginuse = false;
|
||||
wakeup(&emergva);
|
||||
|
Loading…
Reference in New Issue
Block a user