Clean up cpu_startup() a bit:

- move data structure allocation into a new function, allocsys(), like
  the i386 port.
- garbage-collect the BUFFERS_UNMANAGED code.
This commit is contained in:
thorpej 1996-12-11 16:49:23 +00:00
parent f056beb022
commit 9a8786f719

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.76 1996/10/20 23:23:33 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.77 1996/12/11 16:49:23 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -124,6 +124,7 @@ extern struct emul emul_hpux;
#endif
/* prototypes for local functions */
caddr_t allocsys __P((caddr_t));
void parityenable __P((void));
int parityerror __P((struct frame *));
int parityerrorfind __P((void));
@ -203,15 +204,10 @@ cpu_startup()
{
extern char *etext;
register unsigned i;
register caddr_t v, firstaddr;
int base, residual;
register caddr_t v;
int base, residual, sz;
vm_offset_t minaddr, maxaddr;
vm_size_t size;
#ifdef BUFFERS_UNMANAGED
vm_offset_t bufmemp;
caddr_t buffermem;
int ix;
#endif
#ifdef DEBUG
extern int pmapdebug;
int opmapdebug = pmapdebug;
@ -233,94 +229,18 @@ cpu_startup()
*/
printf(version);
identifycpu();
printf("real mem = %d\n", ctob(physmem));
printf("real mem = %d\n", ctob(physmem));
/*
* Allocate space for system data structures.
* The first available real memory address is in "firstaddr".
* The first available kernel virtual address is in "v".
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
* An index into the kernel page table corresponding to the
* virtual memory address maintained in "v" is kept in "mapaddr".
* Find out how much space we need, allocate it,
* and the give everything true virtual addresses.
*/
/*
* Make two passes. The first pass calculates how much memory is
* needed and allocates it. The second pass assigns virtual
* addresses to the various data structures.
*/
firstaddr = 0;
again:
v = (caddr_t)firstaddr;
size = (vm_size_t)allocsys((caddr_t)0);
if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(size))) == 0)
panic("startup: no room for tables");
if ((allocsys(v) - v) != size)
panic("startup: talbe size inconsistency");
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
#ifdef REAL_CLISTS
valloc(cfree, struct cblock, nclist);
#endif
valloc(callout, struct callout, ncallout);
valloc(swapmap, struct map, nswapmap = maxproc * 2);
#ifdef SYSVSHM
valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
#ifdef SYSVSEM
valloc(sema, struct semid_ds, seminfo.semmni);
valloc(sem, struct sem, seminfo.semmns);
/* This is pretty disgusting! */
valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
#endif
#ifdef SYSVMSG
valloc(msgpool, char, msginfo.msgmax);
valloc(msgmaps, struct msgmap, msginfo.msgseg);
valloc(msghdrs, struct msg, msginfo.msgtql);
valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif
/*
* Determine how many buffers to allocate.
* Since HPs tend to be long on memory and short on disk speed,
* we allocate more buffer space than the BSD standard of
* use 10% of memory for the first 2 Meg, 5% of remaining.
* We just allocate a flat 10%. Insure a minimum of 16 buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
*/
if (bufpages == 0)
bufpages = physmem / 10 / CLSIZE;
if (nbuf == 0) {
nbuf = bufpages;
if (nbuf < 16)
nbuf = 16;
}
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
if (firstaddr == 0)
panic("startup: no room for tables");
#ifdef BUFFERS_UNMANAGED
buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
if (buffermem == 0)
panic("startup: no room for buffers");
#endif
goto again;
}
/*
* End of second pass, addresses have been assigned
*/
if ((vm_size_t)(v - firstaddr) != size)
panic("startup: table size inconsistency");
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
@ -334,9 +254,6 @@ again:
panic("startup: cannot allocate buffers");
base = bufpages / nbuf;
residual = bufpages % nbuf;
#ifdef BUFFERS_UNMANAGED
bufmemp = (vm_offset_t) buffermem;
#endif
for (i = 0; i < nbuf; i++) {
vm_size_t curbufsize;
vm_offset_t curbuf;
@ -350,36 +267,9 @@ again:
*/
curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
curbufsize = CLBYTES * (i < residual ? base+1 : base);
#ifdef BUFFERS_UNMANAGED
/*
* Move the physical pages over from buffermem.
*/
for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
vm_offset_t pa;
pa = pmap_extract(pmap_kernel(), bufmemp);
if (pa == 0)
panic("startup: unmapped buffer");
pmap_remove(pmap_kernel(), bufmemp, bufmemp+CLBYTES);
pmap_enter(pmap_kernel(),
(vm_offset_t)(curbuf + ix * CLBYTES),
pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
bufmemp += CLBYTES;
}
#else
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
#endif
}
#ifdef BUFFERS_UNMANAGED
#if 0
/*
* We would like to free the (now empty) original address range
* but too many bad things will happen if we try.
*/
kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
#endif
#endif
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
@ -453,6 +343,71 @@ again:
configure();
}
/*
* Allocate space for system data structures. We are given
* a starting virtual address and we return a final virtual
* address; along the way we set each data structure pointer.
*
* We call allocsys() with 0 to find out how much space we want,
* allocate that much and fill it with zeroes, and the call
* allocsys() again with the correct base virtual address.
*/
caddr_t
allocsys(v)
register caddr_t v;
{
#define valloc(name, type, num) \
(name) = (type *)v; v = (caddr_t)((name)+(num))
#define valloclim(name, type, num, lim) \
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
#ifdef REAL_CLISTS
valloc(cfree, struct cblock, nclist);
#endif
valloc(callout, struct callout, ncallout);
valloc(swapmap, struct map, nswapmap = maxproc * 2);
#ifdef SYSVSHM
valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
#ifdef SYSVSEM
valloc(sema, struct semid_ds, seminfo.semmni);
valloc(sem, struct sem, seminfo.semmns);
/* This is pretty disgusting! */
valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
#endif
#ifdef SYSVMSG
valloc(msgpool, char, msginfo.msgmax);
valloc(msgmaps, struct msgmap, msginfo.msgseg);
valloc(msghdrs, struct msg, msginfo.msgtql);
valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif
/*
* Determine how many buffers to allocate. Since HPs tend
* to be long on memory and short on disk speed, we allocate
* more buffer space than the BSD standard of 10% of memory
* for the first 2 Meg, 5% of the remaining. We just allocate
* a flag 10%. Insure a minimum of 16 buffers. We allocate
* 1/2 as many swap buffer headers as file i/o buffers.
*/
if (bufpages == 0)
bufpages = physmem / 10 / CLSIZE;
if (nbuf == 0) {
nbuf = bufpages;
if (nbuf < 16)
nbuf = 16;
}
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
return (v);
}
/*
* Set registers on exec.
* XXX Should clear registers except sp, pc,