981 lines
23 KiB
C
981 lines
23 KiB
C
/*
|
|
* Copyright (c) 1991 Regents of the University of California.
|
|
* All rights reserved.
|
|
*
|
|
* This code is derived from software contributed to Berkeley by
|
|
* The Mach Operating System project at Carnegie-Mellon University.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by the University of
|
|
* California, Berkeley and its contributors.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
|
* $Id: vm_page.c,v 1.11 1994/01/08 04:02:36 mycroft Exp $
|
|
*
|
|
*
|
|
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
|
* All rights reserved.
|
|
*
|
|
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
|
|
*
|
|
* Permission to use, copy, modify and distribute this software and
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
* notice and this permission notice appear in all copies of the
|
|
* software, derivative works or modified versions, and any portions
|
|
* thereof, and that both notices appear in supporting documentation.
|
|
*
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
|
*
|
|
* Carnegie Mellon requests users of this software to return to
|
|
*
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
* School of Computer Science
|
|
* Carnegie Mellon University
|
|
* Pittsburgh PA 15213-3890
|
|
*
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
* rights to redistribute these changes.
|
|
*/
|
|
|
|
/*
|
|
* Resident memory management module.
|
|
*/
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <vm/vm.h>
|
|
#include <vm/vm_map.h>
|
|
#include <vm/vm_page.h>
|
|
#include <vm/vm_pageout.h>
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
/*
|
|
* Associated with each page of user-allocatable memory is a
|
|
* page structure.
|
|
*/
|
|
|
|
#ifdef MACHINE_NONCONTIG
|
|
/*
|
|
* These variables record the values returned by vm_page_bootstrap,
|
|
* for debugging purposes. The implementation of pmap_steal_memory
|
|
* and pmap_startup here also uses them internally.
|
|
*/
|
|
vm_offset_t virtual_space_start;
|
|
vm_offset_t virtual_space_end;
|
|
#endif /* MACHINE_NONCONTIG */
|
|
|
|
queue_head_t *vm_page_buckets; /* Array of buckets */
|
|
int vm_page_bucket_count = 0; /* How big is array? */
|
|
int vm_page_hash_mask; /* Mask for hash function */
|
|
simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
|
|
|
|
vm_size_t page_size = 4096;
|
|
vm_size_t page_mask = 4095;
|
|
int page_shift = 12;
|
|
|
|
queue_head_t vm_page_queue_free;
|
|
queue_head_t vm_page_queue_active;
|
|
queue_head_t vm_page_queue_inactive;
|
|
simple_lock_data_t vm_page_queue_lock;
|
|
simple_lock_data_t vm_page_queue_free_lock;
|
|
|
|
vm_page_t vm_page_array;
|
|
|
|
#ifndef MACHINE_NONCONTIG
|
|
long first_page;
|
|
long last_page;
|
|
vm_offset_t first_phys_addr;
|
|
vm_offset_t last_phys_addr;
|
|
#else
|
|
u_long first_page;
|
|
int vm_page_count;
|
|
#endif /* MACHINE_NONCONTIG */
|
|
|
|
int vm_page_free_count;
|
|
int vm_page_active_count;
|
|
int vm_page_inactive_count;
|
|
int vm_page_wire_count;
|
|
int vm_page_laundry_count;
|
|
|
|
int vm_page_free_target = 0;
|
|
int vm_page_free_min = 0;
|
|
int vm_page_inactive_target = 0;
|
|
int vm_page_free_reserved = 0;
|
|
|
|
/*
|
|
* vm_set_page_size:
|
|
*
|
|
* Sets the page size, perhaps based upon the memory
|
|
* size. Must be called before any use of page-size
|
|
* dependent functions.
|
|
*
|
|
* Sets page_shift and page_mask from page_size.
|
|
*/
|
|
void
|
|
vm_set_page_size()
|
|
{
|
|
page_mask = page_size - 1;
|
|
|
|
if ((page_mask & page_size) != 0)
|
|
panic("vm_set_page_size: page size not a power of two");
|
|
|
|
for (page_shift = 0; ; page_shift++)
|
|
if ((1 << page_shift) == page_size)
|
|
break;
|
|
}
|
|
|
|
|
|
#ifdef MACHINE_NONCONTIG
|
|
/*
|
|
* vm_page_bootstrap:
|
|
*
|
|
* Initializes the resident memory module.
|
|
*
|
|
* Allocates memory for the page cells, and
|
|
* for the object/offset-to-page hash table headers.
|
|
* Each page cell is initialized and placed on the free list.
|
|
* Returns the range of available kernel virtual memory.
|
|
*/
|
|
void
|
|
vm_page_bootstrap(startp, endp)
|
|
vm_offset_t *startp;
|
|
vm_offset_t *endp;
|
|
{
|
|
int i;
|
|
|
|
extern vm_offset_t kentry_data;
|
|
extern vm_size_t kentry_data_size;
|
|
|
|
|
|
/*
|
|
* Initialize the locks
|
|
*/
|
|
|
|
simple_lock_init(&vm_page_queue_free_lock);
|
|
simple_lock_init(&vm_page_queue_lock);
|
|
|
|
/*
|
|
* Initialize the queue headers for the free queue,
|
|
* the active queue and the inactive queue.
|
|
*/
|
|
|
|
queue_init(&vm_page_queue_free);
|
|
queue_init(&vm_page_queue_active);
|
|
queue_init(&vm_page_queue_inactive);
|
|
|
|
/*
|
|
* Pre-allocate maps and map entries that cannot be dynamically
|
|
* allocated via malloc(). The maps include the kernel_map and
|
|
* kmem_map which must be initialized before malloc() will
|
|
* work (obviously). Also could include pager maps which would
|
|
* be allocated before kmeminit.
|
|
*
|
|
* Allow some kernel map entries... this should be plenty
|
|
* since people shouldn't be cluttering up the kernel
|
|
* map (they should use their own maps).
|
|
*/
|
|
|
|
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
|
|
MAX_KMAPENT * sizeof(struct vm_map_entry);
|
|
kentry_data_size = round_page(kentry_data_size);
|
|
kentry_data = (vm_offset_t) pmap_steal_memory(kentry_data_size);
|
|
|
|
/*
|
|
* Validate these zone addresses.
|
|
*/
|
|
|
|
bzero((caddr_t) kentry_data, kentry_data_size);
|
|
|
|
/*
|
|
* Allocate (and initialize) the virtual-to-physical
|
|
* table hash buckets.
|
|
*
|
|
* The number of buckets MUST BE a power of 2, and
|
|
* the actual value is the next power of 2 greater
|
|
* than the number of physical pages in the system.
|
|
*
|
|
* Note:
|
|
* This computation can be tweaked if desired.
|
|
*/
|
|
|
|
if (vm_page_bucket_count == 0) {
|
|
unsigned int npages = pmap_free_pages();
|
|
|
|
vm_page_bucket_count = 1;
|
|
while (vm_page_bucket_count < npages)
|
|
vm_page_bucket_count <<= 1;
|
|
}
|
|
|
|
vm_page_hash_mask = vm_page_bucket_count - 1;
|
|
|
|
vm_page_buckets = (queue_t)
|
|
pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
|
|
|
|
for (i = 0; i < vm_page_bucket_count; i++) {
|
|
register queue_head_t *bucket = &vm_page_buckets[i];
|
|
|
|
queue_init(bucket);
|
|
}
|
|
|
|
simple_lock_init(&bucket_lock);
|
|
|
|
/*
|
|
* Machine-dependent code allocates the resident page table.
|
|
* It uses VM_PAGE_INIT to initialize the page frames.
|
|
* The code also returns to us the virtual space available
|
|
* to the kernel. We don't trust the pmap module
|
|
* to get the alignment right.
|
|
*/
|
|
|
|
pmap_startup(&virtual_space_start, &virtual_space_end);
|
|
virtual_space_start = round_page(virtual_space_start);
|
|
virtual_space_end = trunc_page(virtual_space_end);
|
|
|
|
*startp = virtual_space_start;
|
|
*endp = virtual_space_end;
|
|
|
|
simple_lock_init(&vm_pages_needed_lock);
|
|
}
|
|
|
|
#else /* MACHINE_NONCONTIG */
|
|
|
|
/*
|
|
* vm_page_startup:
|
|
*
|
|
* Initializes the resident memory module.
|
|
*
|
|
* Allocates memory for the page cells, and
|
|
* for the object/offset-to-page hash table headers.
|
|
* Each page cell is initialized and placed on the free list.
|
|
*/
|
|
vm_offset_t
|
|
vm_page_startup(start, end, vaddr)
|
|
register vm_offset_t start;
|
|
vm_offset_t end;
|
|
register vm_offset_t vaddr;
|
|
{
|
|
register vm_offset_t mapped;
|
|
register vm_page_t m;
|
|
register queue_t bucket;
|
|
vm_size_t npages;
|
|
register vm_offset_t new_start;
|
|
int i;
|
|
vm_offset_t pa;
|
|
|
|
extern vm_offset_t kentry_data;
|
|
extern vm_size_t kentry_data_size;
|
|
|
|
|
|
/*
|
|
* Initialize the locks
|
|
*/
|
|
|
|
simple_lock_init(&vm_page_queue_free_lock);
|
|
simple_lock_init(&vm_page_queue_lock);
|
|
|
|
/*
|
|
* Initialize the queue headers for the free queue,
|
|
* the active queue and the inactive queue.
|
|
*/
|
|
|
|
queue_init(&vm_page_queue_free);
|
|
queue_init(&vm_page_queue_active);
|
|
queue_init(&vm_page_queue_inactive);
|
|
|
|
/*
|
|
* Allocate (and initialize) the hash table buckets.
|
|
*
|
|
* The number of buckets MUST BE a power of 2, and
|
|
* the actual value is the next power of 2 greater
|
|
* than the number of physical pages in the system.
|
|
*
|
|
* Note:
|
|
* This computation can be tweaked if desired.
|
|
*/
|
|
|
|
vm_page_buckets = (queue_t) vaddr;
|
|
bucket = vm_page_buckets;
|
|
if (vm_page_bucket_count == 0) {
|
|
vm_page_bucket_count = 1;
|
|
while (vm_page_bucket_count < atop(end - start))
|
|
vm_page_bucket_count <<= 1;
|
|
}
|
|
|
|
vm_page_hash_mask = vm_page_bucket_count - 1;
|
|
|
|
/*
|
|
* Validate these addresses.
|
|
*/
|
|
|
|
new_start = round_page(((queue_t)start) + vm_page_bucket_count);
|
|
mapped = vaddr;
|
|
vaddr = pmap_map(mapped, start, new_start,
|
|
VM_PROT_READ|VM_PROT_WRITE);
|
|
start = new_start;
|
|
bzero((caddr_t) mapped, vaddr - mapped);
|
|
mapped = vaddr;
|
|
|
|
for (i = vm_page_bucket_count; i--;) {
|
|
queue_init(bucket);
|
|
bucket++;
|
|
}
|
|
|
|
simple_lock_init(&bucket_lock);
|
|
|
|
/*
|
|
* round (or truncate) the addresses to our page size.
|
|
*/
|
|
|
|
end = trunc_page(end);
|
|
|
|
/*
|
|
* Pre-allocate maps and map entries that cannot be dynamically
|
|
* allocated via malloc(). The maps include the kernel_map and
|
|
* kmem_map which must be initialized before malloc() will
|
|
* work (obviously). Also could include pager maps which would
|
|
* be allocated before kmeminit.
|
|
*
|
|
* Allow some kernel map entries... this should be plenty
|
|
* since people shouldn't be cluttering up the kernel
|
|
* map (they should use their own maps).
|
|
*/
|
|
|
|
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
|
|
MAX_KMAPENT * sizeof(struct vm_map_entry);
|
|
kentry_data_size = round_page(kentry_data_size);
|
|
kentry_data = (vm_offset_t) vaddr;
|
|
vaddr += kentry_data_size;
|
|
|
|
/*
|
|
* Validate these zone addresses.
|
|
*/
|
|
|
|
new_start = start + (vaddr - mapped);
|
|
pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
|
|
bzero((caddr_t) mapped, (vaddr - mapped));
|
|
mapped = vaddr;
|
|
start = new_start;
|
|
|
|
/*
|
|
* Compute the number of pages of memory that will be
|
|
* available for use (taking into account the overhead
|
|
* of a page structure per page).
|
|
*/
|
|
|
|
vm_page_free_count = npages =
|
|
(end - start + sizeof(struct vm_page))/(PAGE_SIZE + sizeof(struct vm_page));
|
|
|
|
/*
|
|
* Initialize the mem entry structures now, and
|
|
* put them in the free queue.
|
|
*/
|
|
|
|
m = vm_page_array = (vm_page_t) vaddr;
|
|
first_page = start;
|
|
first_page += npages*sizeof(struct vm_page);
|
|
first_page = atop(round_page(first_page));
|
|
last_page = first_page + npages - 1;
|
|
|
|
first_phys_addr = ptoa(first_page);
|
|
last_phys_addr = ptoa(last_page) + page_mask;
|
|
|
|
/*
|
|
* Validate these addresses.
|
|
*/
|
|
|
|
new_start = start + (round_page(m + npages) - mapped);
|
|
mapped = pmap_map(mapped, start, new_start,
|
|
VM_PROT_READ|VM_PROT_WRITE);
|
|
start = new_start;
|
|
|
|
/*
|
|
* Clear all of the page structures
|
|
*/
|
|
bzero((caddr_t)m, npages * sizeof(*m));
|
|
|
|
pa = first_phys_addr;
|
|
while (npages--) {
|
|
m->phys_addr = pa;
|
|
queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
|
|
m++;
|
|
pa += PAGE_SIZE;
|
|
}
|
|
|
|
/*
|
|
* Initialize vm_pages_needed lock here - don't wait for pageout
|
|
* daemon XXX
|
|
*/
|
|
simple_lock_init(&vm_pages_needed_lock);
|
|
|
|
return(mapped);
|
|
}
|
|
#endif /* MACHINE_NONCONTIG */
|
|
|
|
#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES)
|
|
/*
|
|
* We implement pmap_steal_memory and pmap_startup with the help
|
|
* of two simpler functions, pmap_virtual_space and pmap_next_page.
|
|
*/
|
|
vm_offset_t
|
|
pmap_steal_memory(size)
|
|
vm_size_t size;
|
|
{
|
|
vm_offset_t addr, vaddr, paddr;
|
|
|
|
/*
|
|
* We round the size to an integer multiple.
|
|
*/
|
|
|
|
size = (size + 3) &~ 3;
|
|
|
|
/*
|
|
* If this is the first call to pmap_steal_memory,
|
|
* we have to initialize ourself.
|
|
*/
|
|
|
|
if (virtual_space_start == virtual_space_end) {
|
|
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
|
|
|
|
/*
|
|
* The initial values must be aligned properly, and
|
|
* we don't trust the pmap module to do it right.
|
|
*/
|
|
|
|
virtual_space_start = round_page(virtual_space_start);
|
|
virtual_space_end = trunc_page(virtual_space_end);
|
|
}
|
|
|
|
/*
|
|
* Allocate virtual memory for this request.
|
|
*/
|
|
|
|
addr = virtual_space_start;
|
|
virtual_space_start += size;
|
|
|
|
/*
|
|
* Allocate and map physical pages to back new virtual pages.
|
|
*/
|
|
|
|
for (vaddr = round_page(addr);
|
|
vaddr < addr + size;
|
|
vaddr += PAGE_SIZE) {
|
|
if (!pmap_next_page(&paddr))
|
|
panic("pmap_steal_memory");
|
|
|
|
/*
|
|
* XXX Logically, these mappings should be wired,
|
|
* but some pmap modules barf if they are.
|
|
*/
|
|
|
|
pmap_enter(kernel_pmap, vaddr, paddr,
|
|
VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
|
}
|
|
|
|
return addr;
|
|
}
|
|
|
|
void
|
|
pmap_startup(startp, endp)
|
|
vm_offset_t *startp;
|
|
vm_offset_t *endp;
|
|
{
|
|
unsigned int i;
|
|
vm_offset_t paddr;
|
|
|
|
/*
|
|
* We calculate how many page frames we will have
|
|
* and then allocate the page structures in one chunk.
|
|
*/
|
|
|
|
vm_page_count = ((PAGE_SIZE * pmap_free_pages() +
|
|
(round_page(virtual_space_start) - virtual_space_start)) /
|
|
(PAGE_SIZE + sizeof *vm_page_array));
|
|
|
|
vm_page_array = (vm_page_t) pmap_steal_memory(vm_page_count
|
|
* sizeof *vm_page_array);
|
|
|
|
/*
|
|
* Initialize the page frames.
|
|
*/
|
|
|
|
for (i = 0; i < vm_page_count; i++) {
|
|
if (!pmap_next_page(&paddr))
|
|
break;
|
|
|
|
VM_PAGE_INIT(&vm_page_array[i], NULL, NULL);
|
|
vm_page_array[i].phys_addr = paddr;
|
|
vm_page_free(&vm_page_array[i]);
|
|
}
|
|
/*
|
|
* Remember the actual page count and the index of the first page
|
|
*/
|
|
vm_page_count = i;
|
|
first_page = pmap_page_index(vm_page_array[0].phys_addr);
|
|
|
|
*startp = virtual_space_start;
|
|
*endp = virtual_space_end;
|
|
}
|
|
#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */
|
|
|
|
/*
|
|
* vm_page_hash:
|
|
*
|
|
* Distributes the object/offset key pair among hash buckets.
|
|
*
|
|
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
|
|
*/
|
|
#define vm_page_hash(object, offset) \
|
|
(((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
|
|
|
|
/*
|
|
* vm_page_insert: [ internal use only ]
|
|
*
|
|
* Inserts the given mem entry into the object/object-page
|
|
* table and object list.
|
|
*
|
|
* The object and page must be locked.
|
|
*/
|
|
void
|
|
vm_page_insert(mem, object, offset)
|
|
register vm_page_t mem;
|
|
register vm_object_t object;
|
|
register vm_offset_t offset;
|
|
{
|
|
register queue_t bucket;
|
|
int spl;
|
|
|
|
VM_PAGE_CHECK(mem);
|
|
|
|
if (mem->flags & PG_TABLED)
|
|
panic("vm_page_insert: already inserted");
|
|
|
|
/*
|
|
* Record the object/offset pair in this page
|
|
*/
|
|
|
|
mem->object = object;
|
|
mem->offset = offset;
|
|
|
|
/*
|
|
* Insert it into the object_object/offset hash table
|
|
*/
|
|
|
|
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
|
|
spl = splimp();
|
|
simple_lock(&bucket_lock);
|
|
queue_enter(bucket, mem, vm_page_t, hashq);
|
|
simple_unlock(&bucket_lock);
|
|
(void) splx(spl);
|
|
|
|
/*
|
|
* Now link into the object's list of backed pages.
|
|
*/
|
|
|
|
queue_enter(&object->memq, mem, vm_page_t, listq);
|
|
mem->flags |= PG_TABLED;
|
|
|
|
/*
|
|
* And show that the object has one more resident
|
|
* page.
|
|
*/
|
|
|
|
object->resident_page_count++;
|
|
}
|
|
|
|
/*
|
|
* vm_page_remove: [ internal use only ]
|
|
*
|
|
* Removes the given mem entry from the object/offset-page
|
|
* table and the object page list.
|
|
*
|
|
* The object and page must be locked.
|
|
*/
|
|
void
|
|
vm_page_remove(mem)
|
|
register vm_page_t mem;
|
|
{
|
|
register queue_t bucket;
|
|
int spl;
|
|
|
|
VM_PAGE_CHECK(mem);
|
|
|
|
if (!(mem->flags & PG_TABLED))
|
|
return;
|
|
|
|
/*
|
|
* Remove from the object_object/offset hash table
|
|
*/
|
|
|
|
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
|
|
spl = splimp();
|
|
simple_lock(&bucket_lock);
|
|
queue_remove(bucket, mem, vm_page_t, hashq);
|
|
simple_unlock(&bucket_lock);
|
|
(void) splx(spl);
|
|
|
|
/*
|
|
* Now remove from the object's list of backed pages.
|
|
*/
|
|
|
|
queue_remove(&mem->object->memq, mem, vm_page_t, listq);
|
|
|
|
/*
|
|
* And show that the object has one fewer resident
|
|
* page.
|
|
*/
|
|
|
|
mem->object->resident_page_count--;
|
|
|
|
mem->flags &= ~PG_TABLED;
|
|
}
|
|
|
|
/*
|
|
* vm_page_lookup:
|
|
*
|
|
* Returns the page associated with the object/offset
|
|
* pair specified; if none is found, NULL is returned.
|
|
*
|
|
* The object must be locked. No side effects.
|
|
*/
|
|
vm_page_t
|
|
vm_page_lookup(object, offset)
|
|
register vm_object_t object;
|
|
register vm_offset_t offset;
|
|
{
|
|
register vm_page_t mem;
|
|
register queue_t bucket;
|
|
int spl;
|
|
|
|
/*
|
|
* Search the hash table for this object/offset pair
|
|
*/
|
|
|
|
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
|
|
|
|
spl = splimp();
|
|
simple_lock(&bucket_lock);
|
|
mem = (vm_page_t) queue_first(bucket);
|
|
while (!queue_end(bucket, (queue_entry_t) mem)) {
|
|
VM_PAGE_CHECK(mem);
|
|
if ((mem->object == object) && (mem->offset == offset)) {
|
|
simple_unlock(&bucket_lock);
|
|
splx(spl);
|
|
return(mem);
|
|
}
|
|
mem = (vm_page_t) queue_next(&mem->hashq);
|
|
}
|
|
|
|
simple_unlock(&bucket_lock);
|
|
splx(spl);
|
|
return(NULL);
|
|
}
|
|
|
|
/*
|
|
* vm_page_rename:
|
|
*
|
|
* Move the given memory entry from its
|
|
* current object to the specified target object/offset.
|
|
*
|
|
* The object must be locked.
|
|
*/
|
|
void
|
|
vm_page_rename(mem, new_object, new_offset)
|
|
register vm_page_t mem;
|
|
register vm_object_t new_object;
|
|
vm_offset_t new_offset;
|
|
{
|
|
if (mem->object == new_object)
|
|
return;
|
|
|
|
VM_PAGE_LOCK_QUEUES(); /* keep page from moving out from
|
|
under pageout daemon */
|
|
vm_page_remove(mem);
|
|
vm_page_insert(mem, new_object, new_offset);
|
|
VM_PAGE_UNLOCK_QUEUES();
|
|
}
|
|
|
|
/*
|
|
* vm_page_alloc:
|
|
*
|
|
* Allocate and return a memory cell associated
|
|
* with this VM object/offset pair.
|
|
*
|
|
* Object must be locked.
|
|
*/
|
|
vm_page_t
|
|
vm_page_alloc(object, offset)
|
|
vm_object_t object;
|
|
vm_offset_t offset;
|
|
{
|
|
register vm_page_t mem;
|
|
int spl;
|
|
|
|
spl = splimp(); /* XXX */
|
|
simple_lock(&vm_page_queue_free_lock);
|
|
if ( object != kernel_object &&
|
|
object != kmem_object &&
|
|
vm_page_free_count <= vm_page_free_reserved) {
|
|
|
|
simple_unlock(&vm_page_queue_free_lock);
|
|
splx(spl);
|
|
return(NULL);
|
|
}
|
|
if (queue_empty(&vm_page_queue_free)) {
|
|
simple_unlock(&vm_page_queue_free_lock);
|
|
splx(spl);
|
|
return(NULL);
|
|
}
|
|
|
|
queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
|
|
|
|
vm_page_free_count--;
|
|
simple_unlock(&vm_page_queue_free_lock);
|
|
splx(spl);
|
|
|
|
VM_PAGE_INIT(mem, object, offset);
|
|
|
|
/*
|
|
* Decide if we should poke the pageout daemon.
|
|
* We do this if the free count is less than the low
|
|
* water mark, or if the free count is less than the high
|
|
* water mark (but above the low water mark) and the inactive
|
|
* count is less than its target.
|
|
*
|
|
* We don't have the counts locked ... if they change a little,
|
|
* it doesn't really matter.
|
|
*/
|
|
|
|
if ((vm_page_free_count < vm_page_free_min) ||
|
|
((vm_page_free_count < vm_page_free_target) &&
|
|
(vm_page_inactive_count < vm_page_inactive_target)))
|
|
thread_wakeup(&vm_pages_needed);
|
|
return(mem);
|
|
}
|
|
|
|
/*
|
|
* vm_page_free:
|
|
*
|
|
* Returns the given page to the free list,
|
|
* disassociating it with any VM object.
|
|
*
|
|
* Object and page must be locked prior to entry.
|
|
*/
|
|
void
|
|
vm_page_free(mem)
|
|
register vm_page_t mem;
|
|
{
|
|
vm_page_remove(mem);
|
|
if (mem->flags & PG_ACTIVE) {
|
|
queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
|
|
mem->flags &= ~PG_ACTIVE;
|
|
vm_page_active_count--;
|
|
}
|
|
|
|
if (mem->flags & PG_INACTIVE) {
|
|
queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
|
|
mem->flags &= ~PG_INACTIVE;
|
|
vm_page_inactive_count--;
|
|
}
|
|
|
|
if (!(mem->flags & PG_FICTITIOUS)) {
|
|
int spl;
|
|
|
|
spl = splimp();
|
|
simple_lock(&vm_page_queue_free_lock);
|
|
queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
|
|
|
|
vm_page_free_count++;
|
|
simple_unlock(&vm_page_queue_free_lock);
|
|
splx(spl);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* vm_page_wire:
|
|
*
|
|
* Mark this page as wired down by yet
|
|
* another map, removing it from paging queues
|
|
* as necessary.
|
|
*
|
|
* The page queues must be locked.
|
|
*/
|
|
void
|
|
vm_page_wire(mem)
|
|
register vm_page_t mem;
|
|
{
|
|
VM_PAGE_CHECK(mem);
|
|
|
|
if (mem->wire_count == 0) {
|
|
if (mem->flags & PG_ACTIVE) {
|
|
queue_remove(&vm_page_queue_active, mem, vm_page_t,
|
|
pageq);
|
|
vm_page_active_count--;
|
|
mem->flags &= ~PG_ACTIVE;
|
|
}
|
|
if (mem->flags & PG_INACTIVE) {
|
|
queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
|
|
pageq);
|
|
vm_page_inactive_count--;
|
|
mem->flags &= ~PG_INACTIVE;
|
|
}
|
|
vm_page_wire_count++;
|
|
}
|
|
mem->wire_count++;
|
|
}
|
|
|
|
/*
|
|
* vm_page_unwire:
|
|
*
|
|
* Release one wiring of this page, potentially
|
|
* enabling it to be paged again.
|
|
*
|
|
* The page queues must be locked.
|
|
*/
|
|
void
|
|
vm_page_unwire(mem)
|
|
register vm_page_t mem;
|
|
{
|
|
VM_PAGE_CHECK(mem);
|
|
|
|
mem->wire_count--;
|
|
if (mem->wire_count == 0) {
|
|
queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
|
|
vm_page_active_count++;
|
|
mem->flags |= PG_ACTIVE;
|
|
vm_page_wire_count--;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* vm_page_deactivate:
|
|
*
|
|
* Returns the given page to the inactive list,
|
|
* indicating that no physical maps have access
|
|
* to this page. [Used by the physical mapping system.]
|
|
*
|
|
* The page queues must be locked.
|
|
*/
|
|
void
|
|
vm_page_deactivate(m)
|
|
register vm_page_t m;
|
|
{
|
|
VM_PAGE_CHECK(m);
|
|
|
|
/*
|
|
* Only move active pages -- ignore locked or already
|
|
* inactive ones.
|
|
*
|
|
* XXX: sometimes we get pages which aren't wired down
|
|
* or on any queue - we need to put them on the inactive
|
|
* queue also, otherwise we lose track of them.
|
|
* Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
|
|
*/
|
|
|
|
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0) {
|
|
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
|
if (m->flags & PG_ACTIVE) {
|
|
queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
|
|
m->flags &= ~PG_ACTIVE;
|
|
vm_page_active_count--;
|
|
}
|
|
queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
|
|
m->flags |= PG_INACTIVE;
|
|
vm_page_inactive_count++;
|
|
if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
|
|
m->flags &= ~PG_CLEAN;
|
|
if (m->flags & PG_CLEAN)
|
|
m->flags &= ~PG_LAUNDRY;
|
|
else
|
|
m->flags |= PG_LAUNDRY;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* vm_page_activate:
|
|
*
|
|
* Put the specified page on the active list (if appropriate).
|
|
*
|
|
* The page queues must be locked.
|
|
*/
|
|
void
|
|
vm_page_activate(m)
|
|
register vm_page_t m;
|
|
{
|
|
VM_PAGE_CHECK(m);
|
|
|
|
if (m->flags & PG_INACTIVE) {
|
|
queue_remove(&vm_page_queue_inactive, m, vm_page_t,
|
|
pageq);
|
|
vm_page_inactive_count--;
|
|
m->flags &= ~PG_INACTIVE;
|
|
}
|
|
if (m->wire_count == 0) {
|
|
if (m->flags & PG_ACTIVE)
|
|
panic("vm_page_activate: already active");
|
|
|
|
queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
|
|
m->flags |= PG_ACTIVE;
|
|
vm_page_active_count++;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* vm_page_zero_fill:
|
|
*
|
|
* Zero-fill the specified page.
|
|
* Written as a standard pagein routine, to
|
|
* be used by the zero-fill object.
|
|
*/
|
|
boolean_t
|
|
vm_page_zero_fill(m)
|
|
vm_page_t m;
|
|
{
|
|
VM_PAGE_CHECK(m);
|
|
|
|
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
|
return(TRUE);
|
|
}
|
|
|
|
/*
|
|
* vm_page_copy:
|
|
*
|
|
* Copy one page to another
|
|
*/
|
|
void
|
|
vm_page_copy(src_m, dest_m)
|
|
vm_page_t src_m;
|
|
vm_page_t dest_m;
|
|
{
|
|
VM_PAGE_CHECK(src_m);
|
|
VM_PAGE_CHECK(dest_m);
|
|
|
|
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
|
|
}
|