Machine independant changes to VM for handling non-contiguous memory.

This commit is contained in:
brezak 1993-08-27 23:45:55 +00:00
parent f7c6bf575a
commit 15c3400ffe
7 changed files with 398 additions and 17 deletions

View File

@ -1 +1 @@
revision 1.7 intentionally removed
revision 1.8 intentionally removed

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)msgbuf.h 7.5 (Berkeley) 5/2/91
* $Id: msgbuf.h,v 1.3 1993/05/20 16:22:45 cgd Exp $
* $Id: msgbuf.h,v 1.4 1993/08/27 23:45:55 brezak Exp $
*/
#ifndef _SYS_MSGBUF_H_
@ -46,7 +46,8 @@ struct msgbuf {
char msg_bufc[MSG_BSIZE]; /* buffer */
};
#ifdef KERNEL
struct msgbuf *msgbufp;
extern struct msgbuf *msgbufp;
extern int msgbufmapped;
#endif
#endif /* !_SYS_MSGBUF_H_ */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.h 7.4 (Berkeley) 5/7/91
* $Id: pmap.h,v 1.3 1993/05/20 03:59:13 cgd Exp $
* $Id: pmap.h,v 1.4 1993/08/27 23:48:09 brezak Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -74,8 +74,73 @@
#include <machine/pmap.h>
#ifdef KERNEL
void pmap_bootstrap();
void pmap_init();
/*
* Currently this option is used on the i386 to be able to handle the
* memory from 0-640k and 1M+.
*/
#ifdef MACHINE_NONCONTIG
/*
* Routines used for initialization.
* There is traditionally also a pmap_bootstrap,
* used very early by machine-dependent code,
* but it is not part of the interface.
*/
extern vm_offset_t pmap_steal_memory(); /* During VM initialization,
* steal a chunk of memory.
*/
extern unsigned int pmap_free_pages(); /* During VM initialization,
* report remaining unused
* physical pages.
*/
extern void pmap_startup(); /* During VM initialization,
* use remaining physical pages
* to allocate page frames.
*/
extern void pmap_init(); /* Initialization,
* after kernel runs
* in virtual memory.
*/
/*
* Currently the following isn't really an option. So don't define it.
*/
#undef MACHINE_PAGES
#ifndef MACHINE_PAGES
/*
* If machine/pmap.h defines MACHINE_PAGES, it must implement
* the above functions. The pmap modules has complete control.
* Otherwise, it must implement
* pmap_free_pages
* pmap_virtual_space
* pmap_next_page
* pmap_init
* and vm/vm_page.c implements pmap_steal_memory and pmap_startup
* using pmap_free_pages, pmap_next_page, pmap_virtual_space,
* and pmap_enter. pmap_free_pages may over-estimate the number
* of unused physical pages, and pmap_next_page may return FALSE
* to indicate that there are no more unused pages to return.
* However, for best performance pmap_free_pages should be accurate.
*/
extern boolean_t pmap_next_page(); /* During VM initialization,
* return the next unused
* physical page.
*/
extern void pmap_virtual_space(); /* During VM initialization,
* report virtual space
* available for the kernel.
*/
#endif /* MACHINE_PAGES */
#endif /* MACHINE_NONCONTIG */
#ifdef MACHINE_NONCONTIG
void pmap_bootstrap __P((vm_offset_t s));
void pmap_init __P((void));
#else
void pmap_bootstrap __P((vm_offset_t f, vm_offset_t l));
void pmap_init __P((vm_offset_t s, vm_offset_t e));
#endif
void pmap_pinit __P((struct pmap *pmap));
void pmap_release __P((struct pmap *pmap));
vm_offset_t pmap_map();

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_init.c 7.3 (Berkeley) 4/21/91
* $Id: vm_init.c,v 1.2 1993/05/20 03:59:24 cgd Exp $
* $Id: vm_init.c,v 1.3 1993/08/27 23:47:38 brezak Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -82,22 +82,35 @@
void vm_mem_init()
{
#ifndef MACHINE_NONCONTIG
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
#else
vm_offset_t start, end;
#endif
/*
* Initializes resident memory structures.
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
#ifndef MACHINE_NONCONTIG
virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
#else
vm_page_bootstrap(&start, &end);
#endif
/*
* Initialize other VM packages
*/
vm_object_init();
vm_map_startup();
#ifndef MACHINE_NONCONTIG
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
#else
kmem_init(start, end);
pmap_init();
#endif
vm_pager_init();
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_meter.c 7.11 (Berkeley) 4/20/91
* $Id: vm_meter.c,v 1.3 1993/06/27 06:34:40 andrew Exp $
* $Id: vm_meter.c,v 1.4 1993/08/27 23:47:12 brezak Exp $
*/
#include "param.h"
@ -45,8 +45,10 @@
fixpt_t averunnable[3]; /* load average, of runnable procs */
int maxslp = MAXSLP;
int saferss = SAFERSS;
#ifndef MACHINE_NONCONTIG
int saferss = SAFERSS;
#endif /* MACHINE_NONCONTIG */
void
vmmeter()

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.5 1993/06/30 03:48:25 andrew Exp $
* $Id: vm_page.c,v 1.6 1993/08/27 23:46:02 brezak Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -75,10 +75,20 @@
#include "vm_pageout.h"
/*
* Associated with page of user-allocatable memory is a
* Associated with each page of user-allocatable memory is a
* page structure.
*/
#ifdef MACHINE_NONCONTIG
/*
* These variables record the values returned by vm_page_bootstrap,
* for debugging purposes. The implementation of pmap_steal_memory
* and pmap_startup here also uses them internally.
*/
vm_offset_t virtual_space_start;
vm_offset_t virtual_space_end;
#endif /* MACHINE_NONCONTIG */
queue_head_t *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count = 0; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
@ -95,10 +105,16 @@ simple_lock_data_t vm_page_queue_lock;
simple_lock_data_t vm_page_queue_free_lock;
vm_page_t vm_page_array;
#ifndef MACHINE_NONCONTIG
long first_page;
long last_page;
vm_offset_t first_phys_addr;
vm_offset_t last_phys_addr;
#else
u_long first_page;
int vm_page_count;
#endif /* MACHINE_NONCONTIG */
int vm_page_free_count;
int vm_page_active_count;
@ -133,6 +149,119 @@ void vm_set_page_size()
}
#ifdef MACHINE_NONCONTIG
/*
* vm_page_bootstrap:
*
* Initializes the resident memory module.
*
* Allocates memory for the page cells, and
* for the object/offset-to-page hash table headers.
* Each page cell is initialized and placed on the free list.
* Returns the range of available kernel virtual memory.
*/
void vm_page_bootstrap(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
int i;
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
/*
* Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
* Initialize the queue headers for the free queue,
* the active queue and the inactive queue.
*/
queue_init(&vm_page_queue_free);
queue_init(&vm_page_queue_active);
queue_init(&vm_page_queue_inactive);
/*
* Pre-allocate maps and map entries that cannot be dynamically
* allocated via malloc(). The maps include the kernel_map and
* kmem_map which must be initialized before malloc() will
* work (obviously). Also could include pager maps which would
* be allocated before kmeminit.
*
* Allow some kernel map entries... this should be plenty
* since people shouldn't be cluttering up the kernel
* map (they should use their own maps).
*/
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
MAX_KMAPENT * sizeof(struct vm_map_entry);
kentry_data_size = round_page(kentry_data_size);
kentry_data = (vm_offset_t) pmap_steal_memory(kentry_data_size);
/*
* Validate these zone addresses.
*/
bzero((caddr_t) kentry_data, kentry_data_size);
/*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
*
* The number of buckets MUST BE a power of 2, and
* the actual value is the next power of 2 greater
* than the number of physical pages in the system.
*
* Note:
* This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
unsigned int npages = pmap_free_pages();
vm_page_bucket_count = 1;
while (vm_page_bucket_count < npages)
vm_page_bucket_count <<= 1;
}
vm_page_hash_mask = vm_page_bucket_count - 1;
vm_page_buckets = (queue_t)
pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
for (i = 0; i < vm_page_bucket_count; i++) {
register queue_head_t *bucket = &vm_page_buckets[i];
queue_init(bucket);
}
simple_lock_init(&bucket_lock);
/*
* Machine-dependent code allocates the resident page table.
* It uses vm_page_init to initialize the page frames.
* The code also returns to us the virtual space available
* to the kernel. We don't trust the pmap module
* to get the alignment right.
*/
pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
*startp = virtual_space_start;
*endp = virtual_space_end;
simple_lock_init(&vm_pages_needed_lock);
}
#else /* MACHINE_NONCONTIG */
/*
* vm_page_startup:
*
@ -308,6 +437,111 @@ vm_offset_t vm_page_startup(start, end, vaddr)
return(mapped);
}
#endif /* MACHINE_NONCONTIG */
#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES)
/*
* We implement pmap_steal_memory and pmap_startup with the help
* of two simpler functions, pmap_virtual_space and pmap_next_page.
*/
vm_offset_t pmap_steal_memory(size)
vm_size_t size;
{
vm_offset_t addr, vaddr, paddr;
/*
* We round the size to an integer multiple.
*/
size = (size + 3) &~ 3;
/*
* If this is the first call to pmap_steal_memory,
* we have to initialize ourself.
*/
if (virtual_space_start == virtual_space_end) {
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
/*
* The initial values must be aligned properly, and
* we don't trust the pmap module to do it right.
*/
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
}
/*
* Allocate virtual memory for this request.
*/
addr = virtual_space_start;
virtual_space_start += size;
/*
* Allocate and map physical pages to back new virtual pages.
*/
for (vaddr = round_page(addr);
vaddr < addr + size;
vaddr += PAGE_SIZE) {
if (!pmap_next_page(&paddr))
panic("pmap_steal_memory");
/*
* XXX Logically, these mappings should be wired,
* but some pmap modules barf if they are.
*/
pmap_enter(kernel_pmap, vaddr, paddr,
VM_PROT_READ|VM_PROT_WRITE, FALSE);
}
return addr;
}
void pmap_startup(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
unsigned int i;
vm_offset_t paddr;
/*
* We calculate how many page frames we will have
* and then allocate the page structures in one chunk.
*/
vm_page_count = ((PAGE_SIZE * pmap_free_pages() +
(round_page(virtual_space_start) - virtual_space_start)) /
(PAGE_SIZE + sizeof *vm_page_array));
vm_page_array = (vm_page_t) pmap_steal_memory(vm_page_count
* sizeof *vm_page_array);
/*
* Initialize the page frames.
*/
for (i = 0; i < vm_page_count; i++) {
if (!pmap_next_page(&paddr))
break;
vm_page_init(&vm_page_array[i], NULL, NULL);
vm_page_array[i].phys_addr = paddr;
vm_page_free(&vm_page_array[i]);
}
/*
* Remember the actual page count and the index of the first page
*/
vm_page_count = i;
first_page = pmap_page_index(vm_page_array[0].phys_addr);
*startp = virtual_space_start;
*endp = virtual_space_end;
}
#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */
/*
* vm_page_hash:
@ -490,6 +724,47 @@ void vm_page_init(mem, object, offset)
vm_object_t object;
vm_offset_t offset;
{
#ifdef MACHINE_NONCONTIG
#ifdef DEBUG
#define vm_page_init(mem, object, offset) {\
(mem)->busy = TRUE; \
(mem)->tabled = FALSE; \
if (object) vm_page_insert((mem), (object), (offset)); \
else (mem)->object = NULL; \
(mem)->absent = FALSE; \
(mem)->fictitious = FALSE; \
(mem)->page_lock = VM_PROT_NONE; \
(mem)->unlock_request = VM_PROT_NONE; \
(mem)->laundry = FALSE; \
(mem)->active = FALSE; \
(mem)->inactive = FALSE; \
(mem)->wire_count = 0; \
(mem)->clean = TRUE; \
(mem)->copy_on_write = FALSE; \
(mem)->fake = TRUE; \
(mem)->pagerowned = FALSE; \
(mem)->ptpage = FALSE; \
}
#else /* DEBUG */
#define vm_page_init(mem, object, offset) {\
(mem)->busy = TRUE; \
(mem)->tabled = FALSE; \
if (object) vm_page_insert((mem), (object), (offset)); \
else (mem)->object = NULL; \
(mem)->absent = FALSE; \
(mem)->fictitious = FALSE; \
(mem)->page_lock = VM_PROT_NONE; \
(mem)->unlock_request = VM_PROT_NONE; \
(mem)->laundry = FALSE; \
(mem)->active = FALSE; \
(mem)->inactive = FALSE; \
(mem)->wire_count = 0; \
(mem)->clean = TRUE; \
(mem)->copy_on_write = FALSE; \
(mem)->fake = TRUE; \
}
#endif /* DEBUG */
#else /* MACHINE_NONCONTIG */
#ifdef DEBUG
#define vm_page_init(mem, object, offset) {\
(mem)->busy = TRUE; \
@ -509,7 +784,7 @@ void vm_page_init(mem, object, offset)
(mem)->pagerowned = FALSE; \
(mem)->ptpage = FALSE; \
}
#else
#else /* DEBUG */
#define vm_page_init(mem, object, offset) {\
(mem)->busy = TRUE; \
(mem)->tabled = FALSE; \
@ -526,7 +801,8 @@ void vm_page_init(mem, object, offset)
(mem)->copy_on_write = FALSE; \
(mem)->fake = TRUE; \
}
#endif
#endif /* DEBUG */
#endif /* MACHINE_NONCONTIG */
vm_page_init(mem, object, offset);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91
* $Id: vm_page.h,v 1.4 1993/07/29 21:45:41 jtc Exp $
* $Id: vm_page.h,v 1.5 1993/08/27 23:46:43 brezak Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -138,12 +138,21 @@ struct vm_page {
typedef struct vm_page *vm_page_t;
#if VM_PAGE_DEBUG
#ifdef MACHINE_NONCONTIG
#define VM_PAGE_CHECK(mem) { \
if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > ((unsigned int) &vm_page_array[vm_page_count])) || \
(mem->active && mem->inactive) \
) panic("vm_page_check: not valid!"); \
}
#else /* MACHINE_NONCONTIG */
#define VM_PAGE_CHECK(mem) { \
if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > ((unsigned int) &vm_page_array[last_page-first_page])) || \
(mem->active && mem->inactive) \
) panic("vm_page_check: not valid!"); \
}
#endif /* MACHINE_NONCONTIG */
#else /* VM_PAGE_DEBUG */
#define VM_PAGE_CHECK(mem)
#endif /* VM_PAGE_DEBUG */
@ -174,17 +183,25 @@ queue_head_t vm_page_queue_inactive; /* inactive memory queue */
extern
vm_page_t vm_page_array; /* First resident page in table */
#ifndef MACHINE_NONCONTIG
extern
long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern
long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern
vm_offset_t first_phys_addr; /* physical address for first_page */
extern
vm_offset_t last_phys_addr; /* physical address for last_page */
#else /* MACHINE_NONCONTIG */
extern
u_long first_page; /* first physical page number */
extern
int vm_page_count; /* How many pages do we manage? */
#endif /* MACHINE_NONCONTIG */
/* ... represented in vm_page_array */
extern
int vm_page_free_count; /* How many pages are free? */
@ -207,11 +224,18 @@ int vm_page_laundry_count; /* How many pages being laundered? */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
#ifndef MACHINE_NONCONTIG
#define IS_VM_PHYSADDR(pa) \
((pa) >= first_phys_addr && (pa) <= last_phys_addr)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
#else
#define IS_VM_PHYSADDR(pa) \
(pmap_page_index(pa) >= 0)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[pmap_page_index(pa) - first_page])
#endif /* MACHINE_NONCONTIG */
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive