Removing old VM2, starting to integrate new VM.

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@4135 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Phipps 2003-07-30 11:48:09 +00:00
parent fc45522e03
commit 7241529789
36 changed files with 0 additions and 2858 deletions

View File

@ -1,3 +0,0 @@
SubDir OBOS_TOP src kernel vm2 ;
SharedLibrary vm : area.C areaManager.C areaPool.C cacheManager.C error.C page.C pageManager.C swapFileManager.C vmInterface.C vnodeManager.C vnodePool.C vpage.C ;

View File

@ -1,15 +0,0 @@
1) Implement the sharing methodology of mmap (PRIVATE vs SHARED, etc)
2) Change the architecture so that instead of multiple threads of tests, we have multiple apps of tests...
3) Tests are not done.
Test:
getNextAreaInfo
cloneArea
resizeArea
setAreaProtection
mmap / munmap
paging
disk caching
4) See how unresolvable page faults and permissions errors are handled - need to properly handle *
- This may have to come at HW integration time, since they will be app signals, etc...
5) There is no arch-level integration. This is to be tested (to death) in user land first. * == can not be done in user land.

View File

@ -1,396 +0,0 @@
#include <new.h>
#include "area.h"
#include "areaManager.h"
#include "vpage.h"
#include "vnodePool.h"
#include "vnodeManager.h"
#include "vmHeaderBlock.h"
extern vmHeaderBlock *vmBlock;
// Simple constructor; real work is later
area::area(void) {
}
// Not much here, either
void area::setup (areaManager *myManager) {
//error ("area::setup setting up new area\n");
manager=myManager;
//error ("area::setup done setting up new area\n");
}
// Decide which algorithm to use for finding the next virtual address and try to find one.
unsigned long area::mapAddressSpecToAddress(addressSpec type,void * req,int inPageCount) {
// We will lock in the callers
unsigned long base,requested=(unsigned long)req;
switch (type) {
case EXACT:
base=manager->getNextAddress(inPageCount,requested);
if (base!=requested)
return 0;
break;
case BASE:
base=manager->getNextAddress(inPageCount,requested);
break;
case ANY:
base=manager->getNextAddress(inPageCount,USER_BASE);
break;
case ANY_KERNEL:
base=manager->getNextAddress(inPageCount,KERNEL_BASE);
break;
case CLONE: base=0;break; // Not sure what to do...
default: // should never happen
throw ("Unknown type passed to mapAddressSpecToAddress");
}
error ("area::mapAddressSpecToAddress, in type: %s, address = %x, size = %d\n", ((type==EXACT)?"Exact":(type==BASE)?"BASE":(type==ANY)?"ANY":(type==CLONE)?"CLONE":"ANY_KERNEL"), requested,inPageCount);
return base;
}
vpage *area::getNthVpage(int pageNum) {
/*
error ("Inside getNthVPage; pageNum=%d, fullPages = %d, vpagesOnIndexPage=%d, vpagesOnNextPage=%d\n",pageNum,fullPages,vpagesOnIndexPage,vpagesOnNextPage);
error ("Inside getNthVPage; indexPage = %x, address = %x\n",indexPage,indexPage->getAddress());
for (int i=0;i<PAGE_SIZE/64;fprintf (stderr,"\n"),i++)
for (int j=0;j<32;j++)
fprintf (stderr,"%04.4x ",indexPage->getAddress()+i*64+j);
*/
if (pageNum<=vpagesOnIndexPage) { // Skip the page pointers, then skip to the right vpage number;
return &(((vpage *)(getNthPage(fullPages+1)))[pageNum]);
}
else {
page *myPage=getNthPage(((pageNum-vpagesOnIndexPage-1)/vpagesOnNextPage));
return (vpage *)(myPage->getAddress()+(((pageNum-vpagesOnIndexPage-1)%vpagesOnNextPage)*sizeof(vpage)));
}
}
void area::allocateVPages(int pageCountIn) {
// Allocate all of the physical page space that we will need here and now for vpages
// Allocate number of pages necessary to hold the vpages, plus the index page...
pageCount=pageCountIn;
indexPage=vmBlock->pageMan->getPage();
error ("area::allocateVPages : index page = %x, (physical address = %x\n",indexPage,indexPage->getAddress());
vpagesOnNextPage=PAGE_SIZE/sizeof(vpage); // Number of vpages per full physical page.
fullPages = pageCount / vpagesOnNextPage; // Number of full pages that we need.
int bytesLeftOnIndexPage = PAGE_SIZE-(fullPages*sizeof(vpage *)); // Room left on index page
vpagesOnIndexPage=bytesLeftOnIndexPage/sizeof(vpage);
if ((fullPages*vpagesOnNextPage + vpagesOnIndexPage)< pageCount) { // not enough room...
fullPages++;
bytesLeftOnIndexPage = PAGE_SIZE-(fullPages*sizeof(vpage *)); // Recalculate these, since they have changed.
vpagesOnIndexPage=bytesLeftOnIndexPage/sizeof(vpage);
}
// Allocate the physical page space.
for (int count=0;count<fullPages;count++) {
page *curPage=vmBlock->pageMan->getPage();
((page **)(indexPage->getAddress()))[count]=curPage;
}
error ("area::allocateVPages : index page = %x, (physical address = %x\n",indexPage,indexPage->getAddress());
}
// This is the really interesting part of creating an area
status_t area::createAreaGuts( char *inName, int inPageCount, void **address, addressSpec type, pageState inState, protectType protect, bool inFinalWrite, int fd, size_t offset, area *originalArea /* For clone only*/, mmapSharing share) {
error ("area::createAreaGuts : name = %s, pageCount = %d, address = %lx, addressSpec = %d, pageState = %d, protection = %d, inFinalWrite = %d, fd = %d, offset = %d,originalArea=%ld\n",
inName,inPageCount,address,type,inState,protect,inFinalWrite,fd,offset,originalArea);
vpage *newPage;
// We need RAM - let's fail if we don't have enough... This is Be's way. I probably would do this differently...
if (!originalArea && (inState!=LAZY) && (inState!=NO_LOCK) && (inPageCount>(vmBlock->pageMan->freePageCount())))
return B_NO_MEMORY;
else
error ("origArea = %d, instate = %d, LAZY = %d, NO_LOCK = %d, pageCountIn = %d, free pages = %d\n",
originalArea, inState,LAZY ,NO_LOCK,inPageCount,(vmBlock->pageMan->freePageCount()));
// Get an address to start this area at
unsigned long base=mapAddressSpecToAddress(type,*address,inPageCount);
if (base==0)
return B_ERROR;
// Set up some basic info
strcpy(name,inName);
state=inState;
start_address=base;
*address=(void *)base;
finalWrite=inFinalWrite;
error ("area::createAreaGuts:About to allocate vpages\n");
allocateVPages(inPageCount);
error ("area::createAreaGuts:done allocating vpages\n");
// For non-cloned areas, make a new vpage for every page necesssary.
if (originalArea==NULL) // Not for cloning
for (int i=0;i<pageCount;i++) {
newPage=new (getNthVpage(i)) vpage;
error ("got a vpage at %x\n",newPage);
if (fd) {
error ("area::createAreaGuts:populating vnode\n");
vnode newVnode;
newVnode.fd=fd;
newVnode.offset=offset+i*PAGE_SIZE;
newVnode.valid=true;
// vmBlock->vnodeManager->addVNode(newVnode,newPage);
error ("area::createAreaGuts:calling setup on %x\n",newPage);
newPage->setup(base+PAGE_SIZE*i,&newVnode,NULL,protect,inState,share);
error ("area::createAreaGuts:done with setup on %x\n",newPage);
}
else {
error ("area::createAreaGuts:calling setup on %x\n",newPage);
newPage->setup(base+PAGE_SIZE*i,NULL,NULL,protect,inState);
error ("area::createAreaGuts:done with setup on %x\n",newPage);
}
}
else // cloned
// Need to lock other area, here, just in case...
// Make a copy of each page in the other area...
for (int i=0;i<pageCount;i++) {
vpage *page=originalArea->getNthVpage(i);
newPage=new (getNthVpage(i)) vpage;
newPage->setup(base,page->getBacking(),page->getPhysPage(),protect,inState);// Cloned area has the same physical page and backing store...
base+=PAGE_SIZE;
}
error ("Dumping the area's hashtable\n");
dump();
vmBlock->areas.add(this);
return B_OK;
}
status_t area::createAreaMappingFile(char *inName, int inPageCount,void **address, addressSpec type,pageState inState,protectType protect,int fd,size_t offset, mmapSharing share) {
return createAreaGuts(inName,inPageCount,address,type,inState,protect,true,fd,offset,NULL,share);
}
status_t area::createArea(char *inName, int inPageCount,void **address, addressSpec type,pageState inState,protectType protect) {
return createAreaGuts(inName,inPageCount,address,type,inState,protect,false,0,0);
}
// Clone another area.
status_t area::cloneArea(area *origArea, char *inName, void **address, addressSpec type,pageState inState,protectType protect) {
if (type==CLONE) {
*address=(void *)(origArea->getStartAddress());
type=EXACT;
}
if (origArea->getAreaManager()!=manager) { // If they are in different areas...
origArea->getAreaManager()->lock(); // This is just begging for a deadlock...
status_t retVal = createAreaGuts(inName,origArea->getPageCount(),address,type,inState,protect,false,0,0,origArea);
origArea->getAreaManager()->unlock();
return retVal;
}
else
return createAreaGuts(inName,origArea->getPageCount(),address,type,inState,protect,false,0,0,origArea);
}
// To free an area, interate over its poges, final writing them if necessary, then call cleanup and put the vpage back in the pool
void area::freeArea(void) {
//error ("area::freeArea: starting \n");
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (finalWrite) {
page->flush();
// error ("area::freeArea: flushed page %x\n",page);
}
page->cleanup();
}
for (int i=0;i<fullPages;i++)
vmBlock->pageMan->freePage(getNthPage(i));
vmBlock->pageMan->freePage(indexPage);
// error ("area::freeArea ----------------------------------------------------------------\n");
// vmBlock->vnodeMan->dump();
//error ("area::freeArea: unlocking \n");
//error ("area::freeArea: ending \n");
}
// Get area info
status_t area::getInfo(area_info *dest) {
dest->area=areaID;
strcpy(dest->name,name);
dest->size=pageCount*PAGE_SIZE;
dest->lock=state;
dest->protection=protection;
dest->team=manager->getTeam();
dest->ram_size=0;
dest->in_count=in_count;
dest->out_count=out_count;
dest->copy_count=0;
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (page->isMapped())
dest->ram_size+=PAGE_SIZE;
}
dest->address=(void *)start_address;
return B_OK;
}
bool area::contains(const void *address) {
unsigned long base=(unsigned long)(address);
// error ("area::contains: looking for %d in %d -- %d, value = %d\n",base,getStartAddress(),getEndAddress(), ((getStartAddress()<=base) && (getEndAddress()>=base)));
return ((getStartAddress()<=base) && (base<=getEndAddress()));
}
// Resize an area.
status_t area::resize(size_t newSize) {
size_t oldSize =pageCount*PAGE_SIZE+1;
// Duh. Nothing to do.
if (newSize==oldSize)
return B_OK;
if (newSize>oldSize) { // Grow the area. Figure out how many pages, allocate them and set them up
int newPageCount = (newSize - oldSize + PAGE_SIZE - 1) / PAGE_SIZE;
if (!mapAddressSpecToAddress(EXACT,(void *)(getEndAddress()+1),newPageCount)) // Ensure that the address space is available...
return B_ERROR;
int oldPageMax=vpagesOnIndexPage+(fullPages*vpagesOnNextPage); // Figure out what remaining, empty slots we have...
if (oldPageMax<newSize) { // Do we have enough room in the existing area?
page *oldIndexPage=indexPage; // Guess not. Remember where we were...
int oldVpagesOnIndexPage=vpagesOnIndexPage;
int oldVpagesOnNextPage=vpagesOnNextPage;
int oldFullPages=fullPages;
allocateVPages(newSize/PAGE_SIZE); // Get room
for (int i=0;i<oldSize/PAGE_SIZE;i++)//Shift over existing space...
if (i<=vpagesOnIndexPage)
memcpy(getNthVpage(i),(void *)((indexPage->getAddress()+sizeof(page *)*fullPages)+(sizeof(vpage)*i)),sizeof(vpage));
else {
page *myPage=(page *)(indexPage->getAddress()+((i-vpagesOnIndexPage-1)/vpagesOnNextPage)*sizeof(page *));
memcpy(getNthVpage(i),(void *)(myPage->getAddress()+(((i-vpagesOnIndexPage-1)%vpagesOnNextPage)*sizeof(vpage))),sizeof(vpage));
}
}
for (int i=oldSize;i<newSize;i+=PAGE_SIZE) { // Fill in the new space
vpage *newPage=new (getNthVpage(i/PAGE_SIZE)) vpage; // build a new vpage
newPage->setup(getEndAddress()+PAGE_SIZE*i-1,NULL,NULL,protection,state); // and set it up
}
error ("Old size = %d, new size = %d, pageCount = %d\n",oldSize,newSize,pageCount);
dump();
}
else { // Shrinking - not even going to free up the vpages - that could be a bad decision
size_t newFinalBlock=newSize/PAGE_SIZE;
for (int i=newFinalBlock;i<pageCount;i++) {
vpage *oldPage=getNthVpage(i);
if (finalWrite)
oldPage->flush();
oldPage->cleanup();
}
}
return B_OK;
}
// When the protection for the area changes, the protection for every one of the pages must change
status_t area::setProtection(protectType prot) {
dump();
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
error ("setting protection on %x\n",page);
page->setProtection(prot);
}
protection=prot;
return B_OK;
}
// To fault, find the vpage associated with the fault and call it's fault function
bool area::fault(void *fault_address, bool writeError) { // true = OK, false = panic.
vpage *page=findVPage((unsigned long)fault_address);
if (page)
return page->fault(fault_address,writeError,in_count);
else
return false;
}
char area::getByte(unsigned long address) { // This is for testing only
vpage *page=findVPage(address);
if (page)
return page->getByte(address,manager);
else
throw ("area::getByte - attempting an address out of range!");
}
void area::setByte(unsigned long address,char value) { // This is for testing only
vpage *page=findVPage(address);
if (page)
page->setByte(address,value,manager);
}
int area::getInt(unsigned long address) { // This is for testing only
vpage *page=findVPage(address);
if (page)
return page->getInt(address,manager);
else
return 0;
}
void area::setInt(unsigned long address,int value) { // This is for testing only
// error ("area::setInt - start\n");
vpage *page=findVPage(address);
// error ("area::setInt - page = %x\n",page);
if (page)
page->setInt(address,value,manager);
// error ("area::setInt - done\n");
}
// For every one of our vpages, call the vpage's pager
void area::pager(int desperation) {
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (page->pager(desperation))
out_count++;
}
}
// For every one of our vpages, call the vpage's saver
void area::saver(void) {
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
page->saver();
}
}
void area::dump(void) {
error ("area::dump: size = %ld, lock = %d, address = %lx\n",pageCount*PAGE_SIZE,state,start_address);
for (int vp=0;vp<pageCount;vp++) {
error ("Dumping vpage %d of %d\n",vp,pageCount);
getNthVpage(vp)->dump();
}
}
long area::get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries) {
unsigned long vbase=(unsigned long)address;
long prevMem=0,tableEntry=-1;
// Cycle over each "page to find";
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage *found=findVPage(((unsigned long)(address))+byteOffset);
if (!found) return B_ERROR;
unsigned long mem=found->getPhysPage()->getAddress();
if (mem!=prevMem+PAGE_SIZE) {
if (++tableEntry==numEntries)
return B_ERROR; // Ran out of places to fill in
prevMem=mem;
table[tableEntry].address=(void *)mem;
}
table[tableEntry].size=+PAGE_SIZE;
}
if (++tableEntry==numEntries)
return B_ERROR; // Ran out of places to fill in
table[tableEntry].size=0;
}
long area::lock_memory(void *address, ulong numBytes, ulong flags) {
unsigned long vbase=(unsigned long)address;
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage *found=findVPage((unsigned long)address+byteOffset);
if (!found) return B_ERROR;
if (!found->lock(flags)) return B_ERROR;
}
return B_OK;
}
long area::unlock_memory(void *address, ulong numBytes, ulong flags) {
unsigned long vbase=(unsigned long)address;
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage *found=findVPage((unsigned long)address+byteOffset);
if (!found) return B_ERROR;
found->unlock(flags);
}
return B_OK;
}

View File

@ -1,88 +0,0 @@
#ifndef _AREA_H
#define _AREA_H
#include "OS.h"
#include "vm.h"
#include "page.h"
#include "lockedList.h"
//#include "hashTable.h"
class areaManager;
class vpage;
class area : public node
{
protected:
int fullPages; // full pages of vpage pointers
int vpagesOnIndexPage; // number of vpages stored on the index page
int vpagesOnNextPage; // number of vpages on each "fullPage"
page *indexPage; // physical page of the index page
int pageCount; // count of the number of pages in this area
char name[B_OS_NAME_LENGTH]; // Our name
pageState state; // Allocation policy.
protectType protection; // read, r/w, copy on write
bool finalWrite; // Write blocks in this area out on freeing area?
area_id areaID; // Our numeric ID.
int in_count; // Number of pages read in
int out_count; // Number of pages written out
int copy_count; // Number of block copies that have been made
areaManager *manager; // Our manager/process
unsigned long start_address; // Where we start
vpage *findVPage(unsigned long address) { // Returns the page for this address.
return getNthVpage((address-start_address)/PAGE_SIZE);
// error ("area::findVPage: finding %ld\n",address);
}
void allocateVPages(int pageCount); // Allocates blocks for the vpages
page *getNthPage(int pageNum) { return &(((page *)(indexPage->getAddress()))[pageNum]); }
public:
// Constructors and Destructors and related
area(void);
void setup(areaManager *myManager);
void freeArea(void);
status_t createAreaGuts( char *inName, int pageCount, void **address, addressSpec type, pageState inState, protectType protect, bool inFinalWrite, int fd, size_t offset, area *originalArea=NULL,mmapSharing share=CLONEAREA /* For clone only*/);
status_t createAreaMappingFile(char *name, int pageCount,void **address, addressSpec type,pageState state,protectType protect,int fd,size_t offset, mmapSharing share=CLONEAREA);
status_t createArea (char *name, int pageCount,void **address, addressSpec type,pageState state,protectType protect);
status_t cloneArea(area *area, char *inName, void **address, addressSpec type,pageState inState,protectType protect);
unsigned long mapAddressSpecToAddress(addressSpec type,void *requested,int pageCount);
// Mutators
void setAreaID(int id) {areaID=id;}
status_t setProtection(protectType prot);
status_t resize(size_t newSize);
long get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries);
long lock_memory(void *address, ulong numBytes, ulong flags);
long unlock_memory(void *address, ulong numBytes, ulong flags);
// Accessors
status_t getInfo(area_info *dest);
int getAreaID(void) {return areaID;}
unsigned long getSize(void) {return getEndAddress()-getStartAddress();}
unsigned long getPageCount(void) {return (pageCount);}
areaManager *getAreaManager(void) {return manager;}
unsigned long getEndAddress(void) {return getStartAddress()+(PAGE_SIZE*pageCount)-1;}
unsigned long getStartAddress(void) {return start_address;}
const char *getName(void) {return name;}
vpage *getNthVpage(int pageNum);
// Debugging
void dump(void);
char getByte(unsigned long ); // This is for testing only
void setByte(unsigned long ,char value); // This is for testing only
int getInt(unsigned long ); // This is for testing only
void setInt(unsigned long ,int value); // This is for testing only
// Comparisson with others
bool nameMatch(char *matchName) {return (strcmp(matchName,name)==0);}
bool couldAdd(unsigned long start,unsigned long end) { return ((end<start_address) || (start>getEndAddress()));}
bool contains(const void *address);
// External methods for "server" type calls
void pager(int desperation);
void saver(void);
bool fault(void *fault_address, bool writeError); // true = OK, false = panic.
};
#endif

View File

@ -1,455 +0,0 @@
#include <new.h>
#include "mman.h"
#include "areaManager.h"
#include "vmHeaderBlock.h"
#include "areaPool.h"
extern vmHeaderBlock *vmBlock;
bool areaIsLessThan(void *a,void *b) {
return (((reinterpret_cast<area *>(a))->getStartAddress()) < (reinterpret_cast<area *>(b))->getStartAddress());
}
// This creates the one true lock for this area
areaManager::areaManager(void) {
team=0; // should be proc_get_current_proc_id()
myLock=0;
myLock=create_sem(1,"Area Manager Semaphore"); // Should have team name in it.
areas.setIsLessThan(areaIsLessThan);
}
// Loops over every area looking for someplace where we can get the space we need.
unsigned long areaManager::getNextAddress(int pages, unsigned long start) {
// This function needs to deal with the possibility that we run out of address space...
// areas.dump();
unsigned long end=start+(pages*PAGE_SIZE)-1;
for (struct node *cur=areas.top();cur;cur=cur->next)
{
if (cur)
{
area *myArea=(area *)cur;
// error ("Looking for %x, %d pages; current = %x\n",start,pages,myArea->getEndAddress());
if (!myArea->couldAdd(start,end))
{ // if we don't work, there must be an overlap, so go to the end of this area.
start=myArea->getEndAddress()+1; // Since the end address == last byte in the area...
end=start+(pages*PAGE_SIZE)-1; // See above...
}
}
}
return start;
}
// Remove the area from our list, put it on the area pool and move on
status_t areaManager::freeArea(area_id areaID) {
//error ("areaManager::freeArea: begin\n");
status_t retVal=B_OK;
lock();
area *oldArea=findArea(areaID);
//error ("areaManager::freeArea: found area %x\n",oldArea);
if (oldArea)
{
// error ("areaManager::freeArea: removing area %x from linked list\n",oldArea);
// error ("areaManager::freeArea: areaManager = %x \n",manager);
removeArea(oldArea);
// error ("areaManager::freeArea: deleting area %x \n",oldArea);
vmBlock->areas.remove(oldArea);
oldArea->freeArea();
// error ("areaManager::freeArea: freeArea complete \n");
vmBlock->areaPool->put(oldArea);
}
else {
retVal=B_ERROR;
error ("areaManager::freeArea: unable to find requested area\n");
}
unlock();
// error ("areaManager::freeArea: final unlock complete\n");
return retVal;
}
area *areaManager::findAreaLock(void *address) {
lock();
area *retVal=findArea(address);
unlock();
return retVal;
}
// Loops over our areas looking for this one by name
area *areaManager::findArea(char *address) {
//error ("Finding area by string\n");
area *retVal=NULL;
lock();
for (struct node *cur=areas.top();cur && !retVal;cur=cur->next)
{
area *myArea=(area *)cur;
if (myArea->nameMatch(address))
retVal= myArea;
}
unlock();
return retVal;
}
// Loops over our areas looking for the one whose virtual address matches the passed in address
area *areaManager::findArea(const void *address) {
// THIS DOES NOT HAVE LOCKING - all callers must lock.
// error ("Finding area by void * address\n");
for (struct node *cur=areas.top();cur;cur=cur->next)
{
area *myArea=(area *)cur;
//error ("areaManager::findArea: Looking for %x between %x and %x\n",address,myArea->getStartAddress(),myArea->getEndAddress());
if (myArea->contains(address))
return myArea;
}
// error ("areaManager::findArea is giving up\n");
return NULL;
}
area *areaManager::findAreaLock(area_id id) {
//error ("Finding area by areaID \n");
lock();
area *retVal=findArea(id);
unlock();
return retVal;
}
// Loops over our areas looking for the one whose ID was passed in
area *areaManager::findArea(area_id id) {
//error ("Finding area by area_id\n");
area *retVal=NULL;
for (struct node *cur=areas.top();cur && !retVal;cur=cur->next)
{
area *myArea=(area *)cur;
if (myArea->getAreaID()==id)
retVal= myArea;
}
return retVal;
}
// Find the area whose address matches this page fault and dispatch the fault to it.
bool areaManager::fault(void *fault_address, bool writeError) { // true = OK, false = panic.
area *myArea;
bool retVal;
//error ("Faulting \n");
// lock(); // Normally this should occur, but since we will be locked when we read/write anyway...
myArea=findArea(fault_address);
if (myArea)
retVal= myArea->fault(fault_address,writeError);
else
retVal= false;
// unlock();
return retVal;
}
long areaManager::nextAreaID=0;
// Create an area; get a new structure, call setup, create the guts, set its ID, add it to our list
int areaManager::createArea(char *AreaName,int pageCount,void **address, addressSpec addType,pageState state,protectType protect) {
// error ("areaManager::createArea - Creating an area\n");
lock();
area *newArea = new (vmBlock->areaPool->get()) area;
// error ("areaManager::createArea - got a new area (%p) from the areaPool\n",newArea);
newArea->setup(this);
// error ("areaManager::createArea - setup complete\n");
int retVal = newArea->createArea(AreaName,pageCount,address,addType,state,protect);
// error ("areaManager::createArea - new area's createArea called\n");
if (retVal==B_OK) {
atomic_add(&nextAreaID,1);
newArea->setAreaID(nextAreaID);
//error ("areaManager::createArea - new area's setAreaID called\n");
addArea(newArea);
//error ("areaManager::createArea - new area added to list\n");
retVal=newArea->getAreaID();
// error ("areaManager::createArea - getAreaID=%d\n",retVal);
//error ("areaManager::createArea - new area id found\n");
}
unlock();
//error ("areaManager::createArea - Done Creating an area\n");
return retVal;
}
area *findAreaGlobal(int areaID) {
for (struct node *cur=vmBlock->areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
if (((area *)(cur))->getAreaID()==areaID)
return myArea;
}
return NULL;
}
int areaManager::cloneArea(int newAreaID,char *AreaName,void **address, addressSpec addType,pageState state,protectType protect) {
int retVal;
//error ("Cloning an area\n");
lock();
area *oldArea=findArea(newAreaID);
if (!oldArea)
oldArea=findAreaGlobal(newAreaID);
if (oldArea)
{
area *newArea = new (vmBlock->areaPool->get()) area;
newArea->setup(this);
newArea->cloneArea(oldArea,AreaName,address,addType,state,protect);
atomic_add(&nextAreaID,1);
newArea->setAreaID(nextAreaID);
addArea(newArea);
retVal=newArea->getAreaID();
}
else
retVal=B_ERROR;
unlock();
return retVal;
}
char areaManager::getByte(unsigned long address) {
area *myArea;
// error ("areaManager::getByte : starting\n");
int retVal;
lock();
myArea=findArea((void *)address);
if (myArea)
retVal=myArea->getByte(address);
else
{
char temp[1000];
sprintf (temp,"Unable to find an area for address %d\n",address);
throw (temp);
}
unlock();
return retVal;
}
int areaManager::getInt(unsigned long address) {
area *myArea;
int retVal;
lock();
myArea=findArea((void *)address);
if (myArea)
retVal=myArea->getInt(address);
else
{
char temp[1000];
sprintf (temp,"Unable to find an area for address %d\n",address);
throw (temp);
}
unlock();
return retVal;
}
void areaManager::setByte(unsigned long address,char value) {
// error ("areaManager::setByte : starting\n");
area *myArea;
lock();
myArea=findArea((void *)address);
if (myArea)
myArea->setByte(address,value);
else
{
char temp[1000];
sprintf (temp,"Unable to find an area for address %d\n",address);
throw (temp);
}
unlock();
}
void areaManager::setInt(unsigned long address,int value) {
// error ("areaManager::setInt starting to set on address %lx, value = %d\n",address,value);
area *myArea;
lock();
// error ("areaManager::setInt locked\n");
myArea=findArea((void *)address);
// error ("areaManager::setInt area %s found\n",((myArea)?"":" not "));
try {
if (myArea)
myArea->setInt(address,value);
else {
char temp[1000];
sprintf (temp,"Unable to find an area for address %d\n",address);
unlock();
throw (temp);
}
}
catch (const char *t) { unlock();throw t;}
catch (char *t) { unlock();throw t;}
// error ("areaManager::setInt unlocking\n");
unlock();
}
// Call pager for each of our areas
void areaManager::pager(int desperation) {
lock();
for (struct node *cur=areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
//error ("areaManager::pager; area = \n");
//myArea->dump();
myArea->pager(desperation);
}
unlock();
}
// Call saver for each of our areas
void areaManager::saver(void) {
lock();
for (struct node *cur=areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
myArea->saver();
}
unlock();
}
// mmap is basically map POSIX values to ours and call createAreaMappingFile...
void *areaManager::mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset) {
char name[MAXPATHLEN];
if (fd<0)
return NULL;
// Get the filename from fd...
strcpy( name,"mmap - need to include fileName");
addressSpec addType=((flags&MAP_FIXED)?EXACT:ANY);
protectType protType;
protType=(prot&PROT_WRITE)?writable:(prot&(PROT_READ|PROT_EXEC))?readable:none;
//error ("flags = %x, anon = %x\n",flags,MAP_ANON);
lock();
if (flags & MAP_ANON)
createArea(name,(int)((len+PAGE_SIZE-1)/PAGE_SIZE),&addr, addType ,LAZY,protType);
else {
int shareCount=0;
mmapSharing share;
if (flags & MAP_SHARED) { share=SHARED;shareCount++;}
if (flags & MAP_PRIVATE) { share=PRIVATE;shareCount++;}
if (flags & MAP_COPY){ share=COPY;shareCount++;}
if (shareCount!=1)
addr=NULL;
else {
area *newArea = new (vmBlock->areaPool->get()) area;
newArea->setup(this);
//error ("area = %x, start = %x\n",newArea, newArea->getStartAddress());
newArea->createAreaMappingFile(name,(int)((len+PAGE_SIZE-1)/PAGE_SIZE),&addr,addType,LAZY,protType,fd,offset,share);
atomic_add(&nextAreaID,1);
newArea->setAreaID(nextAreaID);
addArea(newArea);
newArea->getAreaID();
//pageMan.dump();
//newArea->dump();
}
}
unlock();
return addr;
}
// Custom area destruction for mapped files
status_t areaManager::munmap(void *addr,size_t len)
{
// Note that this is broken for any and all munmaps that are not full area in size. This is an all or nothing game...
status_t retVal=B_OK;
lock();
area *myArea=findArea(addr);
if (myArea) {
removeArea(myArea);
myArea->freeArea();
vmBlock->areaPool->put(myArea);
}
else {
error ("areaManager::munmap: unable to find requested area\n");
retVal=B_ERROR;
}
unlock();
return retVal;
}
long areaManager::get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries) {
long retVal = B_ERROR; // Be pessimistic
lock();
// First, figure out what area we should be talking about...
area *myArea=findArea(address);
if (myArea)
retVal = myArea->get_memory_map(address, numBytes,table,numEntries);
unlock();
return retVal;
}
long areaManager::lock_memory(void *address, ulong numBytes, ulong flags) {
long retVal = B_ERROR; // Be pessimistic
lock();
// First, figure out what area we should be talking about...
area *myArea=findArea(address);
if (myArea)
retVal = myArea->lock_memory(address, numBytes,flags);
unlock();
return retVal;
}
long areaManager::unlock_memory(void *address, ulong numBytes, ulong flags) {
long retVal = B_ERROR; // Be pessimistic
lock();
// First, figure out what area we should be talking about...
area *myArea=findArea(address);
if (myArea)
retVal = myArea->unlock_memory(address, numBytes,flags);
unlock();
return retVal;
}
status_t areaManager::getAreaInfo(int areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea)
retVal=oldArea->getInfo(dest);
else
retVal=B_ERROR;
unlock();
return retVal;
}
int areaManager::getAreaByName(char *name) {
int retVal;
lock();
area *oldArea=findArea(name);
if (oldArea)
retVal= oldArea->getAreaID();
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t areaManager::setProtection(int areaID,protectType prot) {
status_t retVal;
error ("area::setProtection about to lock\n");
lock();
error ("area::setProtection locked\n");
area *myArea=findArea(areaID);
if (myArea)
retVal= myArea->setProtection(prot);
else
retVal= B_ERROR;
unlock();
error ("area::setProtection unlocked\n");
return retVal;
}
status_t areaManager::resizeArea(int Area,size_t size) {
status_t retVal;
lock();
area *oldArea=findArea(Area);
if (oldArea)
retVal= oldArea->resize(size);
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t areaManager::getInfoAfter(int32 & areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea->next)
{
area *newCurrent=(reinterpret_cast<area *>(oldArea->next));
retVal=newCurrent->getInfo(dest);
areaID=(int)newCurrent;
}
else
retVal=B_ERROR;
unlock();
return retVal;
}

View File

@ -1,53 +0,0 @@
#include "area.h"
#include "olist.h"
class areaManager // One of these per process
{
private:
orderedList areas; // A list, ordered by address, of our areas.
team_id team; // The team that we belong to
sem_id myLock; // a lock for adding/searching/removing teams
static long nextAreaID; // The next area id to be used. Should be in vmInterface or vmBlock
public:
// Constructors and Destructors and related
areaManager ();
void addArea(area *newArea) {areas.add(newArea);}
void removeArea(area *oldArea) {areas.remove(oldArea); }
status_t freeArea(area_id area);
int createArea(char *AreaName,int pageCount,void **address, addressSpec addType,pageState state,protectType protect) ;
int cloneArea(int newAreaID,char *AreaName,void **address, addressSpec addType=ANY, pageState state=NO_LOCK, protectType prot=writable);
// Accessors
team_id getTeam(void) {return team;}
unsigned long getNextAddress(int pages,unsigned long minimum=USER_BASE);
status_t getAreaInfo(int areaID,area_info *dest);
int getAreaByName(char *name);
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset);
status_t munmap(void *addr,size_t len);
// Mutators
area *findArea(const void *address);
area *findAreaLock(void *address);
area *findArea(char *address);
area *findArea(area_id id);
area *findAreaLock(area_id id);
status_t setProtection(int areaID,protectType prot);
status_t resizeArea(int Area,size_t size);
status_t getInfoAfter(int32 & areaID,area_info *dest);
void lock() { acquire_sem(myLock); }
void unlock() {release_sem(myLock);}
long get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries);
long lock_memory(void *address, ulong numBytes, ulong flags);
long unlock_memory(void *address, ulong numBytes, ulong flags);
// External methods for "server" type calls
bool fault(void *fault_address, bool writeError); // true = OK, false = panic.
void pager(int desperation);
void saver(void);
// Debugging
char getByte(unsigned long offset); // This is for testing only
void setByte(unsigned long offset,char value); // This is for testing only
int getInt(unsigned long offset); // This is for testing only
void setInt(unsigned long offset,int value); // This is for testing only
};

View File

@ -1,31 +0,0 @@
#include "areaPool.h"
#include "area.h"
#include "page.h"
#include "vmHeaderBlock.h"
#include "pageManager.h"
extern vmHeaderBlock *vmBlock;
// If we can get one from an existing block, cool. If not, get a new block, create as many as will fit in the block, put them on the free list and call ourself recursively
area *poolarea::get(void) {
area *ret=NULL;
if (unused.count()) {
//error ("poolarea::get: Getting an unused one!\n");
ret=(area *)unused.next();
}
if (ret) {
//error ("poolarea::get: Returning address:%x \n",ret);
return ret;
}
else {
page *newPage=vmBlock->pageMan->getPage();
//error ("poolarea::get: Getting new page %lx!\n",newPage->getAddress());
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(area);
//error ("poolarea::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((node *)(newPage->getAddress()+(i*sizeof(area)))));
return (get()); // A little cheat - call self again to get the first one from stack...
}
}

View File

@ -1,19 +0,0 @@
#include <OS.h>
#include "lockedList.h"
class area;
class poolarea
{
private:
lockedList unused;
sem_id inUse;
public:
// Constructors and Destructors and related
poolarea(void) { }
// Mutators
area *get(void);
void put(area *in) {
unused.add((node *)in);
}
};

View File

@ -1,75 +0,0 @@
#include <new.h>
#include <cacheManager.h>
#include "vmHeaderBlock.h"
// functions for hash and isEqual. No surprises
ulong vnodeHash (node &vp) {vnode &vn=reinterpret_cast <vnode &>(vp); return vn.offset+vn.fd;}
bool vnodeisEqual (node &vp,node &vp2) {
vnode &vn=reinterpret_cast <vnode &>(vp);
vnode &vn2=reinterpret_cast <vnode &>(vp2);
return vn.fd==vn2.fd && vn.offset==vn2.offset;
}
extern vmHeaderBlock *vmBlock;
cacheManager::cacheManager(void) : area (),cacheMembers(30) {
myLock=create_sem(1,"Cache Manager Semaphore");
cacheMembers.setHash(vnodeHash);
cacheMembers.setIsEqual(vnodeisEqual);
}
// Given a vnode and protection level, see if we have it in cache already
void *cacheManager::findBlock(vnode *target,bool readOnly) {
cacheMember *candidate=reinterpret_cast<cacheMember *>(cacheMembers.find(target));
if (!candidate || readOnly || candidate->vp->getProtection()>=writable)
return candidate;
// At this point, we have the first one in the hash bucket. Loop over the hash bucket from now on,
// looking for an equality and writability match...
for (struct cacheMember *cur=candidate;cur;cur=reinterpret_cast<cacheMember *>(cur->next)) {
if ((target==cur->vn) && (readOnly || (cur->vp->getProtection()>=writable)))
return (cur->vp->getStartAddress());
}
// We didn't find one, but to get here, there has to be one that is READ ONLY. So let's make a copy
// of that one, but readable...
return createBlock(target,false);
}
// No cache hit found; have to make a new one. Find a virtual page, create a vnode, and map.
void *cacheManager::createBlock(vnode *target,bool readOnly, cacheMember *candidate) {
bool foundSpot=false;
vpage *cur=NULL;
unsigned long begin=CACHE_BEGIN;
lock();
// Find a place in the cache's virtual space to put this vnode...
// This *HAS* to succeed, because the address space should be larger than physical memory...
for (int i=0;!foundSpot && i<pageCount;i++) {
cur=getNthVpage(i);
foundSpot=!cur->getPhysPage();
}
// Create a vnode here
cur->setup((unsigned long)(cur->getStartAddress()),target,NULL,((readOnly)?readable:writable),NO_LOCK);
cacheMembers.add(cur);
// While this may not seem like a good idea (since this only happens on a write),
// it is because someone may only write to part of the file/page...
if (candidate)
memcpy(cur->getStartAddress(),candidate->vp->getStartAddress(),PAGE_SIZE);
unlock();
// return address from this vnode
return cur->getStartAddress();
}
void *cacheManager::readBlock(vnode *target) {
void *destination=findBlock(target,true);
if (destination) return destination;
return createBlock(target,true);
}
void *cacheManager::writeBlock(vnode *target) {
void *destination=findBlock(target,false);
if (destination) return destination;
return createBlock(target,false);
}

View File

@ -1,36 +0,0 @@
#include <lockedList.h>
#include <vm.h>
#include <vpage.h>
#include <area.h>
#include <hashTable.h>
struct cacheMember : public node
{
vnode *vn;
vpage *vp;
};
class cacheManager : public area
{
private:
hashTable cacheMembers;
void *findBlock (vnode *target,bool readOnly);
void *createBlock (vnode *target,bool readOnly, cacheMember *candidate=NULL);
sem_id myLock;
public:
// For these two, the VFS passes in the target vnode
// Return value is the address. Note that the paging daemon does the actual loading
// Constructors and Destructors and related
cacheManager(void);
// Mutators
void *readBlock (vnode *target);
void *writeBlock (vnode *target);
void lock() {acquire_sem(myLock);}
void unlock() {release_sem(myLock);}
// External methods for "server" type calls
void pager(int desperation); // override, as we should blow away useless nodes, not just free blocks.
void saver(void); // Override - not sure why
};

View File

@ -1,20 +0,0 @@
#include <OS.h>
#include <stdio.h>
#include <stdarg.h>
static sem_id errorPrinting=0;
// Create a function for standardized formats. Wish I could get rid of the warning associated with this...
void error(char *fmt, ...)
{
if (errorPrinting==0)
errorPrinting=create_sem(1,"error_printing");
acquire_sem(errorPrinting);
va_list argp;
char tmp[2000];
sprintf(tmp, "[%lld] error: %s",real_time_clock_usecs(),fmt);
va_start(argp, tmp);
vfprintf(stderr,tmp,argp);
va_end(argp);
release_sem(errorPrinting);
}

View File

@ -1 +0,0 @@
void error(char *fmt, ...);

View File

@ -1,134 +0,0 @@
#ifndef _HASH_H
#define _HASH_H
#include "lockedList.h"
#include "vm.h"
#include "page.h"
#include "pageManager.h"
#include "vmHeaderBlock.h"
#include <new.h>
extern vmHeaderBlock *vmBlock;
class hashIterate;
class hashTable : public list
{
friend hashIterate;
public:
// Constructors and Destructors and related
hashTable(int size) {
nodeCount=0;
numRocks=size;
//error ("Starting to initalize hash table\n");
if (size*sizeof (list *)>PAGE_SIZE)
throw ("Hash table too big!");
//error ("Getting Page\n");
// Get the block for the page of pointers
page *newPage=vmBlock->pageMan->getPage();
//error ("hashTable::hashTable - Got Page %x\n",newPage);
pageList.add(newPage);
if (!newPage) {
error ("Out of pages to allocate a pool! newPage = %x\n",newPage);
throw ("Out of pages to allocate a pool!");
}
rocks=(list **)(newPage->getAddress());
//error ("Got rocks\n");
int listsPerPage=PAGE_SIZE/sizeof(list);
int pages=(size+(listsPerPage-1))/listsPerPage;
for (int pageCount=0;pageCount<pages;pageCount++) {
// Allocate a page of lists
page *newPage=vmBlock->pageMan->getPage();
//error ("hashTable::hashTable - Got Page %x\n",newPage);
if (!newPage)
throw ("Out of pages to allocate a pool!");
for (int i=0;i<listsPerPage;i++)
rocks[i]=new ((list *)(newPage->getAddress()+(i*sizeof(list)))) list;
pageList.add(newPage);
}
}
~hashTable() {
while (struct page *cur=reinterpret_cast<page *>(pageList.next())) {
//error ("hashTable::~hashTable; freeing page %x\n",cur);
vmBlock->pageMan->freePage(cur);
}
}
// Mutators
void setHash (ulong (*hash_in)(node &)) { hash=hash_in; }
void setIsEqual (bool (*isEqual_in)(node &,node &)) { isEqual=isEqual_in; }
void add (node *newNode) {
if (!hash)
throw ("Attempting to use a hash table without setting up a 'hash' function");
unsigned long hashValue=hash(*newNode)%numRocks;
// Note - no looking for duplicates; no ordering.
rocks[hashValue]->add(newNode);
}
// Accessors
int count(void) {return nodeCount;}
node *find(node *findNode) {
if (!hash)
throw ("Attempting to use a hash table without setting up a 'hash' function");
if (!isEqual)
throw ("Attempting to use a hash table without setting up an 'isEqual' function");
unsigned long hashValue=hash(*findNode)%numRocks;
for (struct node *cur=rocks[hashValue]->top();cur ;cur=cur->next)
if (isEqual(*findNode,*cur))
return cur;
return NULL;
}
node *next(void) {throw ("Next is invalid in a hash table!");} // This operation doesn't make sense for this class
void remove(node *toNuke) {
if (!hash)
throw ("Attempting to use a hash table without setting up a 'hash' function");
unsigned long hashValue=hash(*toNuke)%numRocks;
rocks[hashValue]->remove(toNuke);
}
// Debugging
void dump(void) {
for (int i=0;i<numRocks;i++)
for (struct node *cur=rocks[i]->top();cur;cur=cur->next)
error ("hashTable::dump: On bucket %d of %d, At %p, next = %p\n",i,numRocks,cur,cur->next);
}
bool ensureSane (void) {
bool ok=true;
for (int i=0;i<numRocks;i++)
ok|=rocks[i]->ensureSane();
return ok;
}
private:
ulong (*hash)(node &a);
bool (*isEqual)(node &a,node &b);
list **rocks;
list pageList;
int numRocks;
};
class hashIterate {
private:
int bucket;
node *current;
hashTable &table;
public:
hashIterate(hashTable *in) : bucket(0),current(NULL),table(*in) {}
hashIterate(hashTable &in) : bucket(0),current(NULL),table(in) {}
node *get(void)
{
while (!current && bucket<table.numRocks)
current=table.rocks[bucket++]->top();
if (!current)
return NULL; // No more in the hash table
node *retVal=current; // Store the current and get then next in preperation
current=current->next;
return retVal;
}
};
#endif

View File

@ -1,83 +0,0 @@
#ifndef _LIST_H
#define _LIST_H
// Simple linked list
#include <stdlib.h>
#include <stdio.h>
#include "error.h"
struct node
{
node *next;
};
class list {
public:
// Constructors and Destructors and related
list(void){nodeCount=0;rock=NULL;}
// Mutators
void add (node *newNode) {
newNode->next=rock;
rock=newNode;
nodeCount++;
}
node *next(void) {
//dump();
node *n=rock;
if (rock) {
rock=rock->next;
nodeCount--;
}
//dump();
return n;
}
void remove(node *toNuke) {
//error ("list::remove starting: nuking %x \n",toNuke);
//dump();
if (rock==toNuke) {
rock=rock->next;
nodeCount--;
}
else {
bool done=false;
for (struct node *cur=rock;!done && cur->next;cur=cur->next)
if (cur->next==toNuke) {
cur->next=toNuke->next;
nodeCount--;
done=true;
}
if (!done)
throw ("list::remove failed to find node %x\n",toNuke);
}
//error ("list::remove ending: \n");
//dump();
}
// Accessors
int count(void) {return nodeCount;}
node *top(void) {return rock;} // Intentionally non-destructive ; works like peek() on a queue
// Debugging
void dump(void) {
for (struct node *cur=rock;cur;cur=cur->next)
{ error ("list::dump: At %p, next = %p\n",cur,cur->next); }
}
bool ensureSane (void) {
int temp=nodeCount;
for (struct node *cur=rock;cur && --temp;cur=cur->next) ; // Intentional to have no body
if (temp<0) {
error ("list::ensureSane: found too many records!\n");
return false;
}
if (temp>0) {
error ("list::ensureSane: found too few records!\n");
return false;
}
return true;
}
protected:
struct node *rock;
int nodeCount;
};
#endif

View File

@ -1,19 +0,0 @@
#ifndef _LOCKED_LIST_H
#define _LOCKED_LIST_H
#include <OS.h>
#include <list.h>
class lockedList: public list
{
public:
lockedList(void) {myLock=create_sem(1,"lockedListSem");}
void add(node *newNode) {lock();list::add(newNode);unlock();}
node *next(void) {lock();node *retVal=list::next();unlock();return retVal;}
void remove(node *newNode) {lock();list::remove(newNode);unlock();}
void dump() {lock();list::dump();unlock();}
bool ensureSane(void) {lock();bool retVal=list::ensureSane();unlock();return retVal;}
void lock() { acquire_sem(myLock);}
void unlock() { release_sem(myLock);}
private:
sem_id myLock;
};
#endif

View File

@ -1,118 +0,0 @@
/* $NetBSD: mman.h,v 1.24.2.1 2000/11/20 18:11:32 bouyer Exp $ */
/*-
* Copyright (c) 1982, 1986, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mman.h 8.2 (Berkeley) 1/9/95
*/
#ifndef _SYS_MMAN_H_
#define _SYS_MMAN_H_
/*
* Protections are chosen from these bits, or-ed together
*/
#define PROT_NONE 0x00 /* no permissions */
#define PROT_READ 0x01 /* pages can be read */
#define PROT_WRITE 0x02 /* pages can be written */
#define PROT_EXEC 0x04 /* pages can be executed */
/*
* Flags contain sharing type and options.
* Sharing types; choose one.
*/
#define MAP_SHARED 0x0001 /* share changes */
#define MAP_PRIVATE 0x0002 /* changes are private */
/*
* Deprecated flag; these are treated as MAP_PRIVATE internally by
* the kernel.
*/
#define MAP_COPY 0x0004 /* "copy" region at mmap time */
/*
* Other flags
*/
#define MAP_FIXED 0x0010 /* map addr must be exactly as requested */
#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */
#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */
#define MAP_INHERIT 0x0080 /* region is retained after exec */
#define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */
#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */
/*
* Mapping type
*/
#define MAP_FILE 0x0000 /* map from file (default) */
#define MAP_ANON 0x1000 /* allocated from memory, swap space */
/*
* Error indicator returned by mmap(2)
*/
#define MAP_FAILED ((void *) -1) /* mmap() failed */
/*
* Flags to msync
*/
#define MS_ASYNC 0x01 /* perform asynchronous writes */
#define MS_INVALIDATE 0x02 /* invalidate cached data */
#define MS_SYNC 0x04 /* perform synchronous writes */
/*
* Flags to mlockall
*/
#define MCL_CURRENT 0x01 /* lock all pages currently mapped */
#define MCL_FUTURE 0x02 /* lock all pages mapped in the future */
#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)
/*
* Advice to madvise
*/
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* dont need these pages */
#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
#define MADV_FREE 6 /* pages are empty, free them */
/*
* Flags to minherit
*/
#define MAP_INHERIT_SHARE 0 /* share with child */
#define MAP_INHERIT_COPY 1 /* copy into child */
#define MAP_INHERIT_NONE 2 /* absent from child */
#define MAP_INHERIT_DONATE_COPY 3 /* copy and delete -- not
implemented in UVM */
#define MAP_INHERIT_DEFAULT MAP_INHERIT_COPY
#endif
#endif /* !_SYS_MMAN_H_ */

View File

@ -1,61 +0,0 @@
#ifndef _OLIST_H
#define _OLIST_H
#include "list.h"
static bool throwException (void *foo, void *bar)
{
throw ("Attempting to use an ordered list without setting up a 'toLessThan' function");
}
class orderedList : public list
{
public:
// Constructors and Destructors and related
orderedList(void) {nodeCount=0;rock=NULL; isLessThan=throwException; }
// Mutators
void setIsLessThan (bool (*iLT)(void *,void *)) { isLessThan=iLT; }
void add(node *in) {
nodeCount++;
//error ("orderedList::add starting\n");
if (!rock || isLessThan(in,rock)) { // special case - this will be the first one
//error ("orderedList::specialCase starting\n");
in->next=rock;
rock=in;
}
else {
//error ("orderedList::Normal Case starting\n");
bool done=false;
for (struct node *cur=rock;cur && !done;cur=cur->next)
if (!(cur->next) || isLessThan(in,cur->next)) { // If we have found our niche, *OR* this is the last element, insert here.
//error ("orderedList::Normal Case Adding Start\n");
in->next=cur->next;
cur->next=in;
done=true;
//error ("orderedList::Normal Case Adding END\n");
}
//error ("orderedList::Normal Case ending\n");
}
}
void remove(node *toNuke) {
if (rock==toNuke) {
rock=rock->next;
nodeCount--;
}
else {
bool done=false;
for (struct node *cur=rock;!done && (cur->next);cur=cur->next)
if (cur->next==toNuke) {
cur->next=toNuke->next;
nodeCount--;
done=true;
}
else if (isLessThan(cur->next,toNuke)) // this is backwards intentionally
done=true;
}
}
private:
bool (*isLessThan)(void *a,void *b);
};
#endif

View File

@ -1,9 +0,0 @@
#include "page.h"
#include "string.h"
void page::zero(void)
{
memset(physicalAddress,'0',PAGE_SIZE);
// for (int i=0;i<(PAGE_SIZE/4);i++)
// ((long *)physicalAddress)[i]=0;
}

View File

@ -1,25 +0,0 @@
#ifndef _PAGE_H
#define _PAGE_H
#include "vm.h"
#include "lockedList.h"
class page : public node {
private:
void *cpuSpecific;
void *physicalAddress;
public:
// Constructors and Destructors and related
page(void) {};
void setup (void *address) {count=0;physicalAddress=address;};
// Accessors
unsigned long getAddress(void) {return (unsigned long)physicalAddress;}
// Debugging
void dump(void) { error ("page::dump: Page %p, physicalAddress = %lx\n",this,getAddress()); }
// External methods for "server" type calls
void zero(void);
long count; // Yes, this is large. However, the only atomic add that I have in userland works on int32's. In kernel land, we could shrink this
};
#endif

View File

@ -1,116 +0,0 @@
#include "pageManager.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Handy function (actually handy for the casting) to add a long to a void *
void *addOffset(void *base,unsigned long offset) {
return (void *)(((unsigned long)base+offset));
}
pageManager::pageManager(void) {
}
// Since all of the physical pages will need page structures, allocate memory off of the top for them. Set up the lists and semaphores.
void pageManager::setup(void *area,int pages) {
// Calculate the number of pages that we will need to hold the page structures
int pageOverhead=((pages*sizeof(page))+(PAGE_SIZE-1))/PAGE_SIZE;
for (int i=0;i<pages-pageOverhead;i++) {
page *newPage=(page *)(addOffset(area,i*sizeof(page)));
error ("Setting up a page object at %x, passing in %x\n",
newPage,
addOffset(area,(i+pageOverhead)*PAGE_SIZE));
newPage->setup(addOffset(area,(i+pageOverhead)*PAGE_SIZE));
unused.add(newPage);
}
totalPages=pages;
//error ("pageManager::setup - %d pages ready to top() and roll\n",unused.count());
}
// Try to get a clean page first. If that fails, get a dirty one and clean it. Loop on this.
page *pageManager::getPage(void) {
page *ret=NULL;
while (!ret)
{
if (clean.count()) {
ret=(page *)clean.next();
} // This could fail if someone swooped in and stole our page.
if (!ret && unused.count()) {
ret=(page *)unused.next();
if (ret)
ret->zero();
} // This could fail if someone swooped in and stole our page.
}
error ("pageManager::getPage - returning page %x (phys address %x), clean = %d, unused = %d, inuse = %x\n",ret,ret->getAddress(),clean.count(),unused.count(),inUse.count());
inUse.add(ret);
ret->count++;
if (!ret)
throw ("Out of physical pages!");
return ret;
}
// Take page from in use list and put it on the unused list
void pageManager::freePage(page *toFree) {
//error ("pageManager::freePage; count = %d, address = %p\n",toFree->count,toFree);
if (atomic_add(&(toFree->count),-1)==1) { // atomic_add returns the *PREVIOUS* value. So we need to check to see if the one we are wasting was the last one.
inUse.remove(toFree);
// inUse.dump();
unused.add(toFree);
// unused.dump();
}
}
// Loop forever cleaning any necessary pages
void pageManager::cleaner(void) {
while (1) {
snooze(250000);
cleanOnePage();
}
}
// Find a page that needs cleaning. Take it from the "unused" list, clean it and put it on the clean list.
void pageManager::cleanOnePage(void) {
if (unused.count()) {
page *first=(page *)unused.next();
if (first) {
first->zero();
clean.add(first);
}
}
}
// Calculate how desperate we are for physical pages; 1 is not desperate at all, 5 is critical.
int pageManager::desperation(void) { // Formula to determine how desperate system is to get pages back...
int percentClean=(unused.count()+clean.count())*100/totalPages;
if (percentClean>30) return 1;
return (35-percentClean)/7;
}
void pageManager::dump(void) {
error ("Dumping the unused list (%d entries)\n",getUnusedCount());
unused.lock();
for (struct node *cur=unused.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;
}
unused.unlock();
error ("Dumping the clean list (%d entries)\n",getCleanCount());
clean.lock();
for (struct node *cur=clean.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;
}
error ("Dumping the inuse list (%d entries)\n",getInUseCount());
clean.unlock();
inUse.lock();
for (struct node *cur=inUse.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;
}
inUse.unlock();
}

View File

@ -1,34 +0,0 @@
#ifndef _PAGE_MANAGER_H
#define _PAGE_MANAGER_H
#include "/boot/develop/headers/be/kernel/OS.h"
#include "lockedList.h"
#include "page.h"
class pageManager {
public:
// Constructors and Destructors and related
pageManager(void);
void setup(void *memory,int pages);
void freePage(page *);
// Mutators
page *getPage(void);
// Accessors
int desperation(void);
int freePageCount(void) {return clean.count()+unused.count();}
// External methods for "server" type calls
void cleaner(void);
void cleanOnePage(void);
// Debugging
void dump(void);
int getCleanCount(void) {return clean.count();}
int getUnusedCount(void) {return unused.count();}
int getInUseCount(void) {return inUse.count();}
private:
lockedList clean,unused,inUse;
int totalPages;
};
#endif

View File

@ -1,50 +0,0 @@
#include "pageManager.h"
#include "vmHeaderBlock.h"
extern vmHeaderBlock *vmBlock;
/* This is the template
* replace TYPE with the type you need a pool for
*
class poolTYPE
{
private:
list unused;
sem_id inUse;
public:
poolTYPE(void) { inUse = create_sem(1,"TYPEpool");
}
TYPE *get(void) {
TYPE *ret=NULL;
if (unused.count()) {
error ("poolTYPE::get: Getting an unused one!\n");
acquire_sem(inUse);
ret=(TYPE *)unused.next();
release_sem(inUse);
}
if (ret) {
error ("poolTYPE::get: Returning address:%x \n",ret);
return ret;
}
else {
error ("poolTYPE::get: Getting a new page!\n");
page *newPage=vmBlock->pageMan->getPage();
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(TYPE);
acquire_sem(inUse);
error ("poolTYPE::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((void *)(newPage->getAddress()+(i*sizeof(TYPE)))));
release_sem(inUse);
return (get()); // A little cheat - call self again to get the first one from stack...
}
}
void put(TYPE *in) {
acquire_sem(inUse);
unused.add(in);
release_sem(inUse);
}
};
*/

View File

@ -1,53 +0,0 @@
#include "swapFileManager.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <new.h>
#include <vnodePool.h>
#include "vmHeaderBlock.h"
extern vmHeaderBlock *vmBlock;
// Set up the swap file and make a semaphore to lock it
swapFileManager::swapFileManager(void)
{
swapFile = open("/boot/var/tmp/OBOS_swap",O_RDWR|O_CREAT,0x777 );
if (swapFile==-1)
error ("swapfileManager::swapFileManger: swapfile not opened, errno = %ul, %s\n",errno,strerror(errno));
}
// Try to get a page from the free list. If not, make a new page
vnode &swapFileManager::findNode(void)
{
//error ("swapFileManager::findNode: Entering findNode \n");
//swapFileFreeList.dump();
//error ("swapFileManager::findNode: Finding a new node for you, Master: ");
vnode *newNode;
//error ("locking in sfm\n");
newNode=reinterpret_cast<vnode *>(swapFileFreeList.next());
//error ("unlocking in sfm\n");
if (!newNode)
{
newNode=new (vmBlock->vnodePool->get()) vnode;
newNode->fd=swapFile;
newNode->offset=maxNode+=PAGE_SIZE;
//error (" New One: %d\n",newNode->offset);
}
newNode->valid=false;
//error ("swapFileManager::findNode: swapFileFreeList is now: ");
//swapFileFreeList.dump();
return *newNode;
}
// Add this page to the free list.
void swapFileManager::freeVNode(vnode &v)
{
if (!v.vpages.count())
{
//error ("locking in sfm\n");
//error ("swapFileManager::freeNode: Starting Freeing a new node for you, Master: offset:%d\n",v.offset);
v.valid=false;
swapFileFreeList.add(&v);
//error ("unlocking in sfm\n");
}
}

View File

@ -1,28 +0,0 @@
#ifndef _SWAPFILE_MANAGER
#define _SWAPFILE_MANAGER
#include <unistd.h>
#include <fcntl.h>
#include "vm.h"
#include "OS.h"
#include "lockedList.h"
class swapFileManager {
private:
int swapFile;
unsigned long maxNode;
lockedList swapFileFreeList;
public:
// Constructors and Destructors and related
swapFileManager (void);
void freeVNode(vnode &); // Free a node
// Mutators
vnode &findNode(void); // Get an unused node
void write_block(vnode &node,void *loc,unsigned long size); // The general access points
void read_block(vnode &node,void *loc,unsigned long size);
// Accessors
int getFD(void) {return swapFile;}
};
#endif

View File

@ -1,59 +0,0 @@
#include "lockedList.h"
#ifndef _VM_TYPES
#define _VM_TYPES
const int PAGE_SIZE = 4096;
const int BITS_IN_PAGE_SIZE = 12;
const int AREA_HASH_TABLE_SIZE = 40;
struct vnode : public node
{
int fd;
unsigned long offset;
bool valid;
list vpages;
vnode (void)
{
valid=false;
}
void dump(void) { error ("vnode::dump - fd = %d, offset = %ld, valid = %d\n",fd,offset,valid);
}
};
#define B_OS_NAME_LENGTH 32
enum protectType {none=0,readable, writable,copyOnWrite};
//B_EXACT_ADDRESS You want the value of *addr to be taken literally and strictly.
// If the area can't be allocated at that location, the function fails.
//B_BASE_ADDRESS The area can start at a location equal to or greater than *addr.
//B_ANY_ADDRESS The starting address is determined by the system.
// In this case, the value that's pointed to by addr is ignored (going into the function).
//B_ANY_KERNEL_ADDRESS The starting address is determined by the system, and the new area will belong to the kernel's team;
// it won't be deleted when the application quits. In this case, the value that's pointed to by addr is
// ignored (going into the function)
//B_CLONE_ADDRESS This is only meaningful to the clone_area() function.
enum addressSpec {EXACT,BASE,ANY,ANY_KERNEL,CLONE};
//B_FULL_LOCK The area's memory is locked into RAM when the area is created, and won't be swapped out.
//B_CONTIGUOUS Not only is the area's memory locked into RAM, it's also guaranteed to be contiguous. This is particularly -
// and perhaps exclusively - useful to designers of certain types of device drivers.
//B_LAZY_LOCK Allows individual pages of memory to be brought into RAM through the natural order of things and then locks them.
//B_NO_LOCK Pages are never locked, they're swapped in and out as needed.
//B_LOMEM This is a special constant that's used for for areas that need to be locked, contiguous, and that fit within the
// first 16MB of physical memory. The folks that need this constant know who they are.
enum pageState {FULL,CONTIGUOUS,LAZY,NO_LOCK,LOMEM};
#define USER_BASE 0x10000000
#define KERNEL_BASE 0x80000000
#define CACHE_BEGIN 0x90000000
#define CACHE_END 0xe0000000
#define B_DMA_IO 0x00000001
#define B_READ_DEVICE 0x00000002
enum mmapSharing {CLONEAREA,SHARED,PRIVATE,COPY};
struct physical_entry {void *address;
ulong size;
};
#endif

View File

@ -1,26 +0,0 @@
#ifndef _VM_HEADERBLOCK
#define _VM_HEADERBLOCK
#ifndef I_AM_VM_INTERFACE
class poolarea;
class poolvnode;
class pageManager;
class swapFileManager;
class cacheManager;
class vnodeManager;
class lockedList;
#endif
struct vmHeaderBlock
{
poolarea *areaPool;
poolvnode *vnodePool;
pageManager *pageMan;
swapFileManager *swapMan;
cacheManager *cacheMan;
vnodeManager *vnodeMan;
lockedList areas;
};
#endif

View File

@ -1,271 +0,0 @@
#include <new.h>
#include "mman.h"
#include "lockedList.h"
#include "area.h"
#include "areaPool.h"
#include "vnodePool.h"
#include "pageManager.h"
#include "swapFileManager.h"
#include "cacheManager.h"
#include "vnodeManager.h"
#define I_AM_VM_INTERFACE
#include "vmHeaderBlock.h"
#include "vmInterface.h"
vmHeaderBlock *vmBlock;
static areaManager am;
// The purpose of this interface is to validate options, translate where necessary and pass through to the areaManager.
areaManager *getAM(void)
{
return &am;
}
void *addToPointer(void *ptr,uint32 offset)
{
return ((void *)(((unsigned long)ptr)+offset));
}
areaManager *vmInterface::getAM(void)
{
// Normally, we would go to the current user process to get this. Since there no such thing exists here...
return &am;
}
int32 cleanerThread(void *pageMan)
{
pageManager *pm=(vmBlock->pageMan);
pm->cleaner();
return 0;
}
int32 saverThread(void *areaMan)
{
areaManager *am=getAM();
// This should iterate over all processes...
while (1)
{
snooze(250000);
am->saver();
}
}
int32 pagerThread(void *areaMan)
{
areaManager *am=getAM();
while (1)
{
snooze(1000000);
am->pager(vmBlock->pageMan->desperation());
}
}
vmInterface::vmInterface(int pages)
{
// Make the area for testing
char temp[1000];
sprintf (temp,"vm_test_clone_%ld",getpid());
if (clone_area(temp,(void **)(&vmBlock),B_ANY_ADDRESS,B_WRITE_AREA,find_area("vm_test"))<0)
{
// This is compatability for in BeOS usage only...
if (0>=create_area("vm_test",(void **)(&vmBlock),B_ANY_ADDRESS,B_PAGE_SIZE*pages,B_NO_LOCK,B_READ_AREA|B_WRITE_AREA))
{
error ("pageManager::pageManager: No memory!\n");
exit(1);
}
error ("Allocated an area. Address = %x\n",vmBlock);
// Figure out how many pages we need
int pageCount = (sizeof(poolarea)+sizeof(poolvnode)+sizeof(pageManager)+sizeof(swapFileManager)+sizeof(cacheManager)+sizeof(vmHeaderBlock)+PAGE_SIZE-1)/PAGE_SIZE;
if (pageCount >=pages)
{
error ("Hey! Go buy some ram! Trying to create a VM with fewer pages than the setup will take!\n");
exit(1);
}
error ("Need %d pages, creation calls for %d\n",pageCount,pages);
// Make all of the managers and the vmBlock to hold them
void *currentAddress = addToPointer(vmBlock,sizeof(struct vmHeaderBlock));
vmBlock->pageMan = new (currentAddress) pageManager;
currentAddress=addToPointer(currentAddress,sizeof(pageManager));
vmBlock->pageMan->setup(addToPointer(vmBlock,PAGE_SIZE*pageCount),pages-pageCount);
//error ("Set up Page Man\n");
vmBlock->areaPool = new (currentAddress) poolarea;
currentAddress=addToPointer(currentAddress,sizeof(poolarea));
vmBlock->vnodePool = new (currentAddress) poolvnode;
currentAddress=addToPointer(currentAddress,sizeof(poolvnode));
vmBlock->swapMan = new (currentAddress) swapFileManager;
currentAddress=addToPointer(currentAddress,sizeof(swapFileManager));
vmBlock->cacheMan = new (currentAddress) cacheManager;
currentAddress=addToPointer(currentAddress,sizeof(cacheManager));
vmBlock->vnodeMan = new (currentAddress) vnodeManager;
currentAddress=addToPointer(currentAddress,sizeof(vnodeManager));
error ("Need %d pages, creation calls for %d\n",pageCount,pages);
error ("vmBlock is at %x, end of structures is at %x, pageMan called with address %x, pages = %d\n",vmBlock,currentAddress,addToPointer(vmBlock,PAGE_SIZE*pageCount),pages-pageCount);
}
else
{
error ("Area found!\n");
}
// Start the kernel daemons
resume_thread(tid_cleaner=spawn_thread(cleanerThread,"cleanerThread",0,(vmBlock->pageMan)));
resume_thread(tid_saver=spawn_thread(saverThread,"saverThread",0,getAM()));
resume_thread(tid_pager=spawn_thread(pagerThread,"pagerThread",0,getAM()));
}
// Find an area from an address that it contains
int vmInterface::getAreaByAddress(void *address)
{
int retVal;
area *myArea = getAM()->findAreaLock(address);
if (myArea)
retVal= myArea->getAreaID();
else
retVal= B_ERROR;
return retVal;
}
status_t vmInterface::setAreaProtection(int Area,protectType prot)
{
status_t retVal;
retVal= getAM()->setProtection(Area,prot);
return retVal;
}
status_t vmInterface::resizeArea(int Area,size_t size)
{
status_t retVal;
retVal = getAM()->resizeArea(Area,size);
return retVal;
}
int vmInterface::createArea(char *AreaName,int pageCount,void **address, addressSpec addType,pageState state,protectType protect)
{
int retVal;
// error ("vmInterface::createArea: Creating an area!\n");
if (!AreaName)
return B_BAD_ADDRESS;
if (!address)
return B_BAD_ADDRESS;
if (strlen(AreaName)>=B_OS_NAME_LENGTH)
return B_BAD_VALUE;
if (pageCount<=0)
return B_BAD_VALUE;
if (addType>=CLONE || addType < EXACT)
return B_BAD_VALUE;
if (state>LOMEM || state < FULL)
return B_BAD_VALUE;
if (protect>copyOnWrite || protect < none)
return B_BAD_VALUE;
retVal = getAM()->createArea(AreaName,pageCount,address,addType,state,protect);
// error ("vmInterface::createArea: Done creating an area! ID = %d\n",retVal);
return retVal;
}
status_t vmInterface::delete_area(int area)
{
return getAM()->freeArea(area);
}
status_t vmInterface::getAreaInfo(int Area,area_info *dest)
{
status_t retVal;
//error ("vmInterface::getAreaInfo: Getting info about an area!\n");
if (!dest) return B_ERROR;
retVal = getAM()->getAreaInfo(Area,dest);
//error ("vmInterface::getAreaInfo: Done getting info about an area!\n");
return retVal;
}
status_t vmInterface::getNextAreaInfo(int process,int32 *cookie,area_info *dest)
{
status_t retVal;
// We *SHOULD* be getting the AM for this process. Something for HW integration time...
retVal = getAM()->getInfoAfter(*cookie,dest);
return retVal;
}
int vmInterface::getAreaByName(char *name)
{
int retVal=B_NAME_NOT_FOUND;
vmBlock->areas.lock();
for (struct node *cur=vmBlock->areas.top();cur && retVal==B_NAME_NOT_FOUND;cur=cur->next) {
area *myArea=(area *)cur;
error ("vmInterface::getAreaByName comapring %s to passed in %s\n",myArea->getName(),name);
if (myArea->nameMatch(name))
retVal=myArea->getAreaID();
}
vmBlock->areas.unlock();
return retVal;
}
int vmInterface::cloneArea(int newAreaID,char *AreaName,void **address, addressSpec addType=ANY, pageState state=NO_LOCK, protectType prot=writable)
{
int retVal;
retVal = getAM()->cloneArea(newAreaID,AreaName,address, addType, state, prot);
return retVal;
}
void vmInterface::pager(void)
{
// This should iterate over all processes...
while (1)
{
snooze(250000);
getAM()->pager(vmBlock->pageMan->desperation());
}
}
void vmInterface::saver(void)
{
// This should iterate over all processes...
while (1)
{
snooze(250000);
getAM()->saver();
}
}
void vmInterface::cleaner(void)
{
// This loops on its own
vmBlock->pageMan->cleaner();
}
void *vmInterface::mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset)
{
void *retVal;
retVal = getAM()->mmap(addr,len,prot,flags,fd,offset);
return retVal;
}
status_t vmInterface::munmap(void *addr, size_t len)
{
int retVal;
retVal = getAM()->munmap(addr,len);
return retVal;
}
// Driver Interface
long vmInterface::get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries) {
getAM()->get_memory_map(address, numBytes,table,numEntries);
return B_OK;
}
long vmInterface::lock_memory(void *address, ulong numBytes, ulong flags) {
return getAM()->lock_memory(address,numBytes,flags);
}
long vmInterface::unlock_memory(void *address, ulong numBytes, ulong flags) {
return getAM()->unlock_memory(address,numBytes,flags);
}
area_id vmInterface::map_physical_memory(const char *areaName, void *physAddress, size_t bytes, uint32 spec, uint32 protectionIn, void **vaddress) {
int pages=(bytes + (PAGE_SIZE) - 1)/PAGE_SIZE;
addressSpec as=(addressSpec) spec;
protectType pro=(protectType) protectionIn;
return getAM()->createArea((char *)areaName, pages, vaddress, as, LAZY, pro);
}

View File

@ -1,45 +0,0 @@
#include "vm.h"
#include "pageManager.h"
#include "areaManager.h"
#include "swapFileManager.h"
class vmInterface // This is the class that "owns" all of the managers.
{
private:
areaManager *getAM(void); // This is for testing only...
public:
vmInterface(int pages);
int createArea(char *AreaName,int pageCount,void **address,
addressSpec addType=ANY,
pageState state=NO_LOCK,protectType protect=writable);
status_t delete_area(int Area);
status_t getAreaInfo(int Area,area_info *dest);
status_t getNextAreaInfo(int process,int32 *cookie,area_info *dest);
int getAreaByName(char *name);
int getAreaByAddress(void *address);
int cloneArea(int area,char *AreaName,void **address,
addressSpec addType=ANY,
pageState state=NO_LOCK,
protectType prot=writable);
status_t resizeArea(int area,size_t size);
status_t setAreaProtection(int area,protectType prot);
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset);
status_t munmap(void *addr, size_t len);
void pager(void);
void saver(void);
void cleaner(void);
status_t writeCachedBlock(int fd, size_t offset, void *data);
status_t readCachedBlock(int fd, size_t offset, void *data);
// Driver Interface
long get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries);
long lock_memory(void *address, ulong numBytes, ulong flags);
long unlock_memory(void *address, ulong numBytes, ulong flags);
area_id map_physical_memory(const char *areaName, void *physAddress, size_t bytes, uint32 spec, uint32 protection, void **vaddress);
char getByte(unsigned long offset) {return getAM()->getByte(offset);} // This is for testing only
void setByte(unsigned long offset,char value) {getAM()->setByte(offset,value);} // This is for testing only
int getInt(unsigned long offset) {return getAM()->getInt(offset);} // This is for testing only
void setInt(unsigned long offset,int value) {getAM()->setInt(offset,value);} // This is for testing only
thread_id tid_cleaner,tid_saver,tid_pager;
};

View File

@ -1,94 +0,0 @@
#include <vnodeManager.h>
#include <vnodePool.h>
// Functions for hash and isEqual
ulong mnHash (node &vnodule) { return ((vnode &)vnodule).fd; }
bool mnIsEqual (node &vnodule1,node &vnodule2) {
vnode &v1= ((vnode &)vnodule1);
vnode &v2= ((vnode &)vnodule2);
return v1.fd==v2.fd && v1.offset==v2.offset;
}
// Set the hash and isEqual functions
vnodeManager::vnodeManager(void) : vnodes(20) {
vnodes.setHash(mnHash);
vnodes.setIsEqual(mnIsEqual);
}
// Find the vnode and return the first page that points to it.
vpage *vnodeManager::findVnode(vnode &target) {
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (found==NULL)
return NULL;
else
return reinterpret_cast<vpage *>(found->vpages.top());
}
// If this vnode is already in use, add this vpage to it and return one to clone. If not, add this vnode, with this vpage, and return null
// This method will make a new vnode object
vpage *vnodeManager::addVnode(vnode &target,vpage &vp, vnode **newOne) {
vpage *retVal;
//error ("vnodeManager::addVnode : Adding by reference node %x, fd = %d, offset = %d\n",&target,target.fd,target.offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (!found) {
found=new (vmBlock->vnodePool->get()) vnode;
found->fd=target.fd;
found->offset=target.offset;
found->valid=target.valid;
vnodes.add(found);
*newOne=found;
retVal=NULL;
}
else {
retVal=reinterpret_cast<vpage *>(found->vpages.top());
*newOne=retVal->getBacking();
}
found->vpages.add(&vp);
//error ("vnodeManager::addVnode returning %x, newOne = %x \n");
return retVal;
}
// If this vnode is already in use, add this vpage to it and return one to clone. If not, add this vnode, with this vpage, and return null
// This method will NOT make a new vnode object
vpage *vnodeManager::addVnode(vnode *target,vpage &vp) {
vpage *retVal;
//error ("vnodeManager::addVnode : Adding by pointer node %x, fd = %d, offset = %d\n",target,target->fd,target->offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(target));
if (!found) {
found=target;
vnodes.add(found);
retVal=NULL;
}
else
retVal=reinterpret_cast<vpage *>(found->vpages.top());
found->vpages.add(&vp);
// found->vpages.dump();
// vnodes.dump();
}
// Remove a vpage from the manager; return "is this the last one"
bool vnodeManager::remove(vnode &target,vpage &vp) {
//error ("vnodeManager::remove : Removing by reference node %x, fd = %d, offset = %d\n",&target,target.fd,target.offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (!found) {
vnodes.dump();
throw ("An attempt to remove from an unknown vnode occured!\n");
}
found->vpages.remove(&vp);
if (found->vpages.count()==0) {
vnodes.remove(found);
return true;
}
else
return false;
}
void vnodeManager::dump(void) {
for (hashIterate hi(vnodes);node *cur=hi.get();) {
vnode *found=reinterpret_cast<vnode *>(cur);
error ("vnodeManager::dump found vnode:");
found->dump();
}
}

View File

@ -1,21 +0,0 @@
#ifndef VNODE_MANAGER
#define VNODE_MANAGER
#include <vpage.h>
#include <hashTable.h>
// vnode manager tracks which vnodes are already mapped to pages
// and facilitates sharing of memory containing the same disk space.
class vnodeManager
{
public:
vnodeManager(void);
vpage *findVnode(vnode &target); // pass in a vnode, get back the "master" vpage
vpage *addVnode (vnode &target, vpage &vp,vnode **retOne);
vpage *addVnode(vnode *target,vpage &vp);
bool remove(vnode &target,vpage &vp);
void dump(void);
private:
hashTable vnodes;
};
#endif

View File

@ -1,28 +0,0 @@
#include "vnodePool.h"
#include "vmHeaderBlock.h"
#include "pageManager.h"
extern vmHeaderBlock *vmBlock;
vnode *poolvnode::get(void) {
vnode *ret=NULL;
if (unused.count()) {
//error ("poolvnode::get: Getting an unused one!\n");
ret=(vnode *)unused.next();
}
if (ret) {
//error ("poolvnode::get: Returning address:%x \n",ret);
return ret;
}
else {
page *newPage=vmBlock->pageMan->getPage();
//error ("poolvnode::get: Getting new page %lx!\n",newPage->getAddress());
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(vnode);
//error ("poolvnode::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((node *)(newPage->getAddress()+(i*sizeof(vnode)))));
return (get()); // A little cheat - call self again to get the first one from stack...
}
}

View File

@ -1,15 +0,0 @@
#include <OS.h>
#include "lockedList.h"
class area;
class vnode;
class poolvnode
{
private:
lockedList unused;
public:
poolvnode(void) {;}
vnode *get(void);
void put(vnode *in)
{ unused.add((node *)in); }
};

View File

@ -1,250 +0,0 @@
#include "vpage.h"
#include "vnodePool.h"
#include "vmHeaderBlock.h"
#include "areaManager.h"
#include "vnodeManager.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
extern vmHeaderBlock *vmBlock;
// Write this vpage out if necessary
void vpage::flush(void) {
if (physPage && getProtection()==writable && isDirty()) {
// error ("vpage::write_block: writing, backingNode->fd = %d, backingNode->offset = %d, address = %x\n",backingNode->fd, backingNode->offset,physPage->getAddress());
if (-1==lseek(backingNode->fd,backingNode->offset,SEEK_SET))
error ("vpage::flush:seek failed, fd = %d, errno = %d, %s\n",backingNode->fd,errno,strerror(errno));
if (-1==write(backingNode->fd,(void *)(physPage->getAddress()),PAGE_SIZE))
error ("vpage::flush: failed address =%x, fd = %d, offset = %d, errno = %d, %s\n",
start_address,backingNode->fd, backingNode->offset, errno,strerror(errno));
backingNode->valid=true;
//error ("vpage::write_block: done, backingNode->fd = %d, backingNode->offset = %d, address = %x\n",backingNode->fd, backingNode->offset,loc);
}
}
// Load this vpage in if necessary
void vpage::refresh(void) {
// error ("vpage::refresh: reading into %x\n",physPage->getAddress());
// backingNode->dump();
if (backingNode->valid==false)
return; // Do nothing. This prevents "garbage" data on disk from being read in...
if (-1==lseek(backingNode->fd,backingNode->offset,SEEK_SET))
error ("vpage::refresh: seek failed, fd = %d, errno = %d, %s\n",backingNode->fd,errno,strerror(errno));
if (-1==read(backingNode->fd,(void *)(physPage->getAddress()),PAGE_SIZE))
error ("vpage::refresh: failed, fd = %d, errno = %d, %s\n",backingNode->fd,errno,strerror(errno));
}
// Simple, empty constructor
vpage::vpage(void) : physPage(NULL),backingNode(NULL),start_address(0),bits(0)
{
}
// Does the real setup work for making a vpage.
// backing and/or physMem can be NULL/0.
void vpage::setup(unsigned long start,vnode *backing, page *physMem,protectType prot,pageState state, mmapSharing share) {
// Basic setup from parameters
vpage *clonedPage; // This is the page that this page is to be the clone of...
error ("vpage::setup: start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
physPage=physMem;
backingNode=backing;
setProtection(prot);
error ("vpage::setup: fields, part 1 set\n");
dirty(false);
swappable(state==NO_LOCK);
locked(state==NO_LOCK);
start_address=start;
error ("vpage::setup: fields, part 2 set\n");
// Set up the backing store. If one is specified, use it; if not, get a swap file page.
if (backingNode) { // This is an mmapped file (or a cloned area)
switch (share) {
case CLONE: // This is a cloned area
case SHARED: // This is a shared mmap
clonedPage=vmBlock->vnodeMan->addVnode(*backingNode,*this,&backingNode); // Use the reference version which will make a new one if this one is not found
if (clonedPage) physPage=clonedPage->physPage;
break;
case PRIVATE: // This is a one way share - we get others changes (until we make a change) but no one gets our changes
clonedPage=vmBlock->vnodeMan->addVnode(*backingNode,*this,&backingNode); // Use the reference version which will make a new one if this one is not found
if (clonedPage) physPage=clonedPage->physPage;
setProtection((getProtection()<=readable)?getProtection(): copyOnWrite);
break;
case COPY: // This is not shared - get a fresh page and fresh swap file space and copy the original page
physPage=vmBlock->pageMan->getPage();
clonedPage=vmBlock->vnodeMan->findVnode(*backing); // Find out if the page we are copying is in memory already...
if (clonedPage && clonedPage->physPage) // If it is in memory, copy its values
memcpy((void *)(physPage->getAddress()),(void *)(clonedPage->physPage->getAddress()),PAGE_SIZE);
else
refresh(); // otherwise, get a copy from disk...
backingNode=&(vmBlock->swapMan->findNode()); // Now get swap space (since we don't want to be backed by the file...
clonedPage=vmBlock->vnodeMan->addVnode(backingNode,*this); // Add this vnode to the vnode keeper
break;
}
}
else { // Going to swap file.
backingNode=&(vmBlock->swapMan->findNode());
clonedPage=vmBlock->vnodeMan->addVnode(backingNode,*this); // Use the pointer version which will use this one. Should always return NULL
}
error ("vpage::setup: Backing node set up\n");
// If there is no physical page already and we can't wait to get one, then get one now
if (!physPage && (state!=LAZY) && (state!=NO_LOCK)) {
physPage=vmBlock->pageMan->getPage();
error ("vpage::setup, state = %d, allocated page %x\n",state,physPage);
}
else { // We either don't need it or we already have it.
if (physPage)
atomic_add(&(physPage->count),1);
}
error ("vpage::setup: ended : start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
}
// Destruction.
void vpage::cleanup(void) {
if (physPage) { // Note that free means release one reference
//error ("vpage::cleanup, freeing physcal page %x\n",physPage);
vmBlock->pageMan->freePage(physPage); // This does nothing if someone else is using the physical page
}
if (backingNode) { // If no one else is using this vnode, wipe it out
if (vmBlock->vnodeMan->remove(*backingNode,*this))
if (backingNode->fd && (backingNode->fd==vmBlock->swapMan->getFD()))
vmBlock->swapMan->freeVNode(*backingNode);
else
vmBlock->vnodePool->put(backingNode);
}
}
// Change this pages protection
void vpage::setProtection(protectType prot) {
protection(prot);
// Change the hardware
}
// This is dispatched by the real interrupt handler, who locates us
// true = OK, false = panic.
bool vpage::fault(void *fault_address, bool writeError, int &in_count) {
// error ("vpage::fault: virtual address = %lx, write = %s\n",(unsigned long) fault_address,((writeError)?"true":"false"));
if (writeError && getProtection() != copyOnWrite && getProtection() != writable)
return false;
if (writeError && physPage) { // If we already have a page and this is a write, it is either a copy on write or a "dirty" notice
dirty(true);
if (getProtection()==copyOnWrite) { // Else, this was just a "let me know when I am dirty"...
page *newPhysPage=vmBlock->pageMan->getPage();
// error ("vpage::fault - copy on write allocated page %x\n",newPhysPage);
memcpy((void *)(newPhysPage->getAddress()),(void *)(physPage->getAddress()),PAGE_SIZE);
physPage=newPhysPage;
setProtection(writable);
vmBlock->vnodeMan->remove(*backingNode,*this);
backingNode=&(vmBlock->swapMan->findNode()); // Need new backing store for this node, since it was copied, the original is no good...
vmBlock->vnodeMan->addVnode(backingNode,*this);
// Update the architecture specific stuff here...
}
return true;
}
// Guess this is the real deal. Get a physical page.
physPage=vmBlock->pageMan->getPage();
// error ("vpage::fault - regular - allocated page %x\n",physPage);
if (!physPage) // No room at the inn
return false;
// error ("vpage::fault: New page allocated! new physical address = %x vnode.fd=%d, vnode.offset=%d, \n",physPage->getAddress(),((backingNode)?backingNode->fd:0),((backingNode)?backingNode->offset:0));
// Update the architecture specific stuff here...
// This refresh is unneeded if the data was never written out...
// dump();
refresh(); // I wonder if these vnode calls are safe during an interrupt...
dirty(writeError); // If the client is writing, we are now dirty (or will be when we get back to user land)
in_count++;
//error ("vpage::fault: Refreshed\n");
// //dump();
//error ("vpage::fault: exiting\n");
return true;
}
bool vpage::lock(long flags) {
locked(true);
if (!physPage) {
physPage=vmBlock->pageMan->getPage();
if (!physPage)
return false;
refresh();
}
return true;
}
void vpage::unlock(long flags) {
if ((flags & B_DMA_IO) || (!(flags & B_READ_DEVICE)))
dirty(true);
locked(false);
}
char vpage::getByte(unsigned long address,areaManager *manager) {
if (!physPage)
if (!manager->fault((void *)(address),false))
throw ("vpage::getByte");
// error ("vpage::getByte: About to return %c from %x\n", *((char *)(address-start_address+physPage->getAddress())),(address-start_address+physPage->getAddress()));
return *((char *)(address-start_address+physPage->getAddress()));
}
void vpage::setByte(unsigned long address,char value,areaManager *manager) {
// error ("vpage::setByte: address = %d, value = %d\n",address, value);
if (!physPage)
if (!manager->fault((void *)(address),true))
throw ("vpage::setByte");
if (getProtection()>=writable)
*((char *)(address-start_address+physPage->getAddress()))=value;
else
throw ("vpage::setByte - no permission to write");
// error ("vpage::setByte: physical address = %d, value = %d\n",physPage->getAddress(), *((char *)(physPage->getAddress())));
}
int vpage::getInt(unsigned long address,areaManager *manager) {
// error ("vpage::getInt: address = %ld\n",address );
if (!physPage)
if (!manager->fault((void *)(address),false))
throw ("vpage::getInt");
//error ("vpage::getInt: About to return %d\n", *((char *)(address-start_address+physPage->getAddress())));
//dump();
return *((int *)(address-start_address+physPage->getAddress()));
}
void vpage::setInt(unsigned long address,int value,areaManager *manager) {
// error ("vpage::setInt: here I am!\n");
if (!physPage)
if (!manager->fault((void *)(address),true))
throw ("vpage::setInt");
if (getProtection()>=writable)
*((int *)(address-start_address+physPage->getAddress()))=value;
else
throw ("vpage::setInt - no permission to write");
// error ("vpage::setInt: leaving!\n");
}
// Swaps pages out where necessary.
bool vpage::pager(int desperation) {
//error ("vpage::pager start desperation = %d\n",desperation);
if (!isSwappable())
return false;
//error ("vpage::pager swappable\n");
switch (desperation) {
case 1: return false; break;
case 2: if (!physPage || getProtection()!=readable || isLocked()) return false;break;
case 3: if (!physPage || isDirty() || isLocked()) return false;break;
case 4: if (!physPage || isLocked()) return false;break;
case 5: if (!physPage || isLocked()) return false;break;
default: return false;break;
}
//error ("vpage::pager flushing\n");
flush();
//error ("vpage::pager freeing\n");
vmBlock->pageMan->freePage(physPage);
//error ("vpage::pager going to NULL\n");
physPage=NULL;
return true;
}
// Saves dirty pages
void vpage::saver(void) {
if (isDirty()) {
flush();
dirty(false);
}
}

View File

@ -1,70 +0,0 @@
#ifndef VPAGE_H
#define VPAGE_H
#include <vm.h>
#include <pageManager.h>
#include <swapFileManager.h>
class areaManager;
class vpage : public node
{
private:
page *physPage;
vnode *backingNode;
char bits; // 0/1 are protection, 2 == dirty, 3 == swappable, 4 == locked
unsigned long start_address; //bye
public:
// Constructors and Destructors and related
vpage(void);
vpage(unsigned long address) {start_address=address-address%PAGE_SIZE;} // Only for lookups
// Setup should now only be called by the vpage manager...
void setup(unsigned long start,vnode *backing, page *physMem,protectType prot,pageState state, mmapSharing share=CLONEAREA); // backing and/or physMem can be NULL/0.
void cleanup(void);
// Mutators
void setProtection(protectType prot);
void flush(void); // write page to vnode, if necessary
void refresh(void); // Read page back in from vnode
bool lock(long flags); // lock this page into memory
void unlock(long flags); // unlock this page from memory
void dirty(bool yesOrNo) {if (yesOrNo) bits|=4; else bits &= ~4;}
void swappable(bool yesOrNo) {if (yesOrNo) bits|=8; else bits &= ~8;}
void locked(bool yesOrNo) {if (yesOrNo) bits|=16; else bits &= ~16;}
void protection(protectType prot) {bits|= (prot & 3);}
// Accessors
protectType getProtection(void) {return (protectType)(bits & 3);}
bool isDirty(void) {return bits & 4;}
bool isSwappable(void) {return bits & 8;}
bool isLocked(void) {return bits & 16;}
void *getStartAddress(void) {return (void *)start_address;}
page *getPhysPage(void) {return physPage;}
vnode *getBacking(void) {return backingNode;}
bool isMapped(void) {return (physPage);}
unsigned long end_address(void) {return start_address+PAGE_SIZE;}
// Comparisson with others
ulong hash(void) {return start_address >> BITS_IN_PAGE_SIZE;}
bool operator==(vpage &rhs) {return rhs.start_address==start_address; }
bool contains(uint32 address) { return ((start_address<=address) && (end_address()>=address)); }
// External methods for "server" type calls
bool fault(void *fault_address, bool writeError, int &in_count); // true = OK, false = panic.
bool pager(int desperation);
void saver(void);
// Debugging
void dump(void) {
error ("Dumping vpage %p, address = %lx, vnode-fd=%d, vnode-offset = %d, dirty = %d, swappable = %d, locked = %d\n",
this,start_address, ((backingNode)?(backingNode->fd):99999), ((backingNode)?(backingNode->offset):999999999),
isDirty(),isSwappable(),isLocked());
if (physPage)
physPage->dump();
else
error ("NULL\n");
}
char getByte(unsigned long offset,areaManager *manager); // This is for testing only
void setByte(unsigned long offset,char value,areaManager *manager); // This is for testing only
int getInt(unsigned long offset,areaManager *manager); // This is for testing only
void setInt(unsigned long offset,int value,areaManager *manager); // This is for testing only
};
#endif

View File

@ -1,37 +0,0 @@
#include "vpagePool.h"
#include "vmHeaderBlock.h"
#include "pageManager.h"
#include "vpage.h"
extern vmHeaderBlock *vmBlock;
vpage *poolvpage::get(void)
{
vpage *ret=NULL;
if (unused.count())
{
//error ("poolvpage::get: Getting an unused one!\n");
acquire_sem(inUse);
ret=(vpage *)unused.next();
release_sem(inUse);
}
if (ret)
{
//error ("poolvpage::get: Returning address:%x \n",ret);
return ret;
}
else
{
page *newPage=vmBlock->pageMan->getPage();
//error ("poolvpage::get: Getting new page %lx!\n",newPage->getAddress());
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(vpage);
acquire_sem(inUse);
//error ("poolvpage::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((node *)(newPage->getAddress()+(i*sizeof(vpage)))));
release_sem(inUse);
return (get()); // A little cheat - call self again to get the first one from stack...
}
}

View File

@ -1,20 +0,0 @@
#include <OS.h>
#include "lockedList.h"
class vpage;
class poolvpage
{
private:
list unused;
sem_id inUse;
public:
poolvpage(void) {
inUse = create_sem(1,"vpagepool");
}
vpage *get(void);
void put(vpage *in) {
acquire_sem(inUse);
unused.add((node *)in);
release_sem(inUse);
}
};