More code cleanup. Removing unused variables. Simplifying and shrinking

vpage to be nearly a TLB entry. Final stripping will occur with kernel
integration. More comments.


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@3042 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Phipps 2003-03-26 04:00:38 +00:00
parent 80f714a205
commit d50d9a6a18
20 changed files with 343 additions and 315 deletions

View File

@ -1,3 +1,3 @@
SubDir OBOS_TOP src kernel vm2 ;
SharedLibrary vm : area.C areaManager.C areaPool.C cacheManager.C error.C page.C pageManager.C swapFileManager.C vmInterface.C vnodeManager.C vnodePool.C vpage.C vpagePool.C ;
SharedLibrary vm : area.C areaManager.C areaPool.C cacheManager.C error.C page.C pageManager.C swapFileManager.C vmInterface.C vnodeManager.C vnodePool.C vpage.C ;

View File

@ -4,19 +4,13 @@
#include "vpage.h"
#include "vnodePool.h"
#include "vnodeManager.h"
#include "vpagePool.h"
#include "vmHeaderBlock.h"
extern vmHeaderBlock *vmBlock;
ulong vpageHash (node &vp) {return reinterpret_cast <vpage &>(vp).hash();}
bool vpageisEqual (node &vp,node &vp2) {return reinterpret_cast <vpage &>(vp)==reinterpret_cast <vpage &>(vp2);}
// Simple constructor; real work is later
area::area(void) : vpages(AREA_HASH_TABLE_SIZE) {
vpages.setHash(vpageHash);
vpages.setIsEqual(vpageisEqual);
area::area(void) {
}
// Not much here, either
@ -27,94 +21,147 @@ void area::setup (areaManager *myManager) {
}
// Decide which algorithm to use for finding the next virtual address and try to find one.
unsigned long area::mapAddressSpecToAddress(addressSpec type,void * req,int pageCount) {
unsigned long area::mapAddressSpecToAddress(addressSpec type,void * req,int inPageCount) {
// We will lock in the callers
unsigned long base,requested=(unsigned long)req;
switch (type) {
case EXACT:
base=manager->getNextAddress(pageCount,requested);
base=manager->getNextAddress(inPageCount,requested);
if (base!=requested)
return 0;
break;
case BASE:
base=manager->getNextAddress(pageCount,requested);
base=manager->getNextAddress(inPageCount,requested);
break;
case ANY:
base=manager->getNextAddress(pageCount,USER_BASE);
base=manager->getNextAddress(inPageCount,USER_BASE);
break;
case ANY_KERNEL:
base=manager->getNextAddress(pageCount,KERNEL_BASE);
base=manager->getNextAddress(inPageCount,KERNEL_BASE);
break;
case CLONE: base=0;break; // Not sure what to do...
default: // should never happen
throw ("Unknown type passed to mapAddressSpecToAddress");
}
error ("area::mapAddressSpecToAddress, in type: %s, address = %x, size = %d\n", ((type==EXACT)?"Exact":(type==BASE)?"BASE":(type==ANY)?"ANY":(type==CLONE)?"CLONE":"ANY_KERNEL"), requested,pageCount);
error ("area::mapAddressSpecToAddress, in type: %s, address = %x, size = %d\n", ((type==EXACT)?"Exact":(type==BASE)?"BASE":(type==ANY)?"ANY":(type==CLONE)?"CLONE":"ANY_KERNEL"), requested,inPageCount);
return base;
}
vpage *area::getNthVpage(int pageNum) {
/*
error ("Inside getNthVPage; pageNum=%d, fullPages = %d, vpagesOnIndexPage=%d, vpagesOnNextPage=%d\n",pageNum,fullPages,vpagesOnIndexPage,vpagesOnNextPage);
error ("Inside getNthVPage; indexPage = %x, address = %x\n",indexPage,indexPage->getAddress());
for (int i=0;i<PAGE_SIZE/64;fprintf (stderr,"\n"),i++)
for (int j=0;j<32;j++)
fprintf (stderr,"%04.4x ",indexPage->getAddress()+i*64+j);
*/
if (pageNum<=vpagesOnIndexPage) { // Skip the page pointers, then skip to the right vpage number;
return &(((vpage *)(getNthPage(fullPages+1)))[pageNum]);
}
else {
page *myPage=getNthPage(((pageNum-vpagesOnIndexPage-1)/vpagesOnNextPage));
return (vpage *)(myPage->getAddress()+(((pageNum-vpagesOnIndexPage-1)%vpagesOnNextPage)*sizeof(vpage)));
}
}
void area::allocateVPages(int pageCountIn) {
// Allocate all of the physical page space that we will need here and now for vpages
// Allocate number of pages necessary to hold the vpages, plus the index page...
pageCount=pageCountIn;
indexPage=vmBlock->pageMan->getPage();
error ("area::allocateVPages : index page = %x, (physical address = %x\n",indexPage,indexPage->getAddress());
vpagesOnNextPage=PAGE_SIZE/sizeof(vpage); // Number of vpages per full physical page.
fullPages = pageCount / vpagesOnNextPage; // Number of full pages that we need.
int bytesLeftOnIndexPage = PAGE_SIZE-(fullPages*sizeof(vpage *)); // Room left on index page
vpagesOnIndexPage=bytesLeftOnIndexPage/sizeof(vpage);
if ((fullPages*vpagesOnNextPage + vpagesOnIndexPage)< pageCount) { // not enough room...
fullPages++;
bytesLeftOnIndexPage = PAGE_SIZE-(fullPages*sizeof(vpage *)); // Recalculate these, since they have changed.
vpagesOnIndexPage=bytesLeftOnIndexPage/sizeof(vpage);
}
// Allocate the physical page space.
for (int count=0;count<fullPages;count++) {
page *curPage=vmBlock->pageMan->getPage();
((page **)(indexPage->getAddress()))[count]=curPage;
}
error ("area::allocateVPages : index page = %x, (physical address = %x\n",indexPage,indexPage->getAddress());
}
// This is the really interesting part of creating an area
status_t area::createAreaGuts( char *inName, int pageCount, void **address, addressSpec type, pageState inState, protectType protect, bool inFinalWrite, int fd, size_t offset, area *originalArea /* For clone only*/, mmapSharing share) {
status_t area::createAreaGuts( char *inName, int inPageCount, void **address, addressSpec type, pageState inState, protectType protect, bool inFinalWrite, int fd, size_t offset, area *originalArea /* For clone only*/, mmapSharing share) {
error ("area::createAreaGuts : name = %s, pageCount = %d, address = %lx, addressSpec = %d, pageState = %d, protection = %d, inFinalWrite = %d, fd = %d, offset = %d,originalArea=%ld\n",
inName,pageCount,address,type,inState,protect,inFinalWrite,fd,offset,originalArea);
inName,inPageCount,address,type,inState,protect,inFinalWrite,fd,offset,originalArea);
vpage *newPage;
// We need RAM - let's fail if we don't have enough... This is Be's way. I probably would do this differently...
if (!originalArea && (inState!=LAZY) && (inState!=NO_LOCK) && (pageCount>(vmBlock->pageMan->freePageCount())))
if (!originalArea && (inState!=LAZY) && (inState!=NO_LOCK) && (inPageCount>(vmBlock->pageMan->freePageCount())))
return B_NO_MEMORY;
// else
// error ("origArea = %d, instate = %d, LAZY = %d, NO_LOCK = %d, pageCountIn = %d, free pages = %d\n",
// originalArea, inState,LAZY ,NO_LOCK,pageCount,(vmBlock->pageMan->freePageCount()));
else
error ("origArea = %d, instate = %d, LAZY = %d, NO_LOCK = %d, pageCountIn = %d, free pages = %d\n",
originalArea, inState,LAZY ,NO_LOCK,inPageCount,(vmBlock->pageMan->freePageCount()));
// Get an address to start this area at
unsigned long base=mapAddressSpecToAddress(type,*address,pageCount);
unsigned long base=mapAddressSpecToAddress(type,*address,inPageCount);
if (base==0)
return B_ERROR;
// Set up some basic info
strcpy(name,inName);
state=inState;
start_address=base;
end_address=base+(pageCount*PAGE_SIZE)-1;
*address=(void *)base;
finalWrite=inFinalWrite;
error ("area::createAreaGuts:About to allocate vpages\n");
allocateVPages(inPageCount);
error ("area::createAreaGuts:done allocating vpages\n");
// For non-cloned areas, make a new vpage for every page necesssary.
if (originalArea==NULL) // Not for cloning
for (int i=0;i<pageCount;i++) {
newPage=new (vmBlock->vpagePool->get()) vpage;
newPage=new (getNthVpage(i)) vpage;
error ("got a vpage at %x\n",newPage);
if (fd) {
error ("area::createAreaGuts:populating vnode\n");
vnode newVnode;
newVnode.fd=fd;
newVnode.offset=offset+i*PAGE_SIZE;
newVnode.valid=true;
// vmBlock->vnodeManager->addVNode(newVnode,newPage);
error ("area::createAreaGuts:calling setup on %x\n",newPage);
newPage->setup(base+PAGE_SIZE*i,&newVnode,NULL,protect,inState,share);
error ("area::createAreaGuts:done with setup on %x\n",newPage);
}
else
else {
error ("area::createAreaGuts:calling setup on %x\n",newPage);
newPage->setup(base+PAGE_SIZE*i,NULL,NULL,protect,inState);
vpages.add(newPage);
error ("area::createAreaGuts:done with setup on %x\n",newPage);
}
}
else // cloned
// Need to lock other area, here, just in case...
// Make a copy of each page in the other area...
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
newPage=new (vmBlock->vpagePool->get()) vpage;
for (int i=0;i<pageCount;i++) {
vpage *page=originalArea->getNthVpage(i);
newPage=new (getNthVpage(i)) vpage;
newPage->setup(base,page->getBacking(),page->getPhysPage(),protect,inState);// Cloned area has the same physical page and backing store...
vpages.add(newPage);
base+=PAGE_SIZE;
}
//error ("Dumping the area's hashtable");
//dump();
error ("Dumping the area's hashtable\n");
dump();
vmBlock->areas.add(this);
return B_OK;
}
status_t area::createAreaMappingFile(char *inName, int pageCount,void **address, addressSpec type,pageState inState,protectType protect,int fd,size_t offset, mmapSharing share) {
return createAreaGuts(inName,pageCount,address,type,inState,protect,true,fd,offset,NULL,share);
status_t area::createAreaMappingFile(char *inName, int inPageCount,void **address, addressSpec type,pageState inState,protectType protect,int fd,size_t offset, mmapSharing share) {
return createAreaGuts(inName,inPageCount,address,type,inState,protect,true,fd,offset,NULL,share);
}
status_t area::createArea(char *inName, int pageCount,void **address, addressSpec type,pageState inState,protectType protect) {
return createAreaGuts(inName,pageCount,address,type,inState,protect,false,0,0);
status_t area::createArea(char *inName, int inPageCount,void **address, addressSpec type,pageState inState,protectType protect) {
return createAreaGuts(inName,inPageCount,address,type,inState,protect,false,0,0);
}
// Clone another area.
@ -136,21 +183,19 @@ status_t area::cloneArea(area *origArea, char *inName, void **address, addressSp
// To free an area, interate over its poges, final writing them if necessary, then call cleanup and put the vpage back in the pool
void area::freeArea(void) {
//error ("area::freeArea: starting \n");
// vpages.dump();
node *cur;
for (hashIterate hi(vpages);node *cur=hi.get();) {
//error ("area::freeArea: wasting a page: %x\n",cur);
vpage *page=reinterpret_cast<vpage *>(cur);
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (finalWrite) {
page->flush();
// error ("area::freeArea: flushed page %x\n",page);
}
page->cleanup();
//page->next=NULL;
vmBlock->vpagePool->put(page);
}
vpages.~hashTable();
for (int i=0;i<fullPages;i++)
vmBlock->pageMan->freePage(getNthPage(i));
vmBlock->pageMan->freePage(indexPage);
// error ("area::freeArea ----------------------------------------------------------------\n");
// vmBlock->vnodeMan->dump();
//error ("area::freeArea: unlocking \n");
@ -161,7 +206,7 @@ void area::freeArea(void) {
status_t area::getInfo(area_info *dest) {
dest->area=areaID;
strcpy(dest->name,name);
dest->size=end_address-start_address;
dest->size=pageCount*PAGE_SIZE;
dest->lock=state;
dest->protection=protection;
dest->team=manager->getTeam();
@ -169,8 +214,8 @@ status_t area::getInfo(area_info *dest) {
dest->in_count=in_count;
dest->out_count=out_count;
dest->copy_count=0;
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (page->isMapped())
dest->ram_size+=PAGE_SIZE;
}
@ -180,52 +225,62 @@ status_t area::getInfo(area_info *dest) {
bool area::contains(const void *address) {
unsigned long base=(unsigned long)(address);
// error ("area::contains: looking for %d in %d -- %d, value = %d\n",base,start_address,end_address, ((start_address<=base) && (end_address>=base)));
// error ("area::contains: looking for %d in %d -- %d, value = %d\n",base,getStartAddress(),getEndAddress(), ((getStartAddress()<=base) && (getEndAddress()>=base)));
return ((start_address<=base) && (base<=end_address));
return ((getStartAddress()<=base) && (base<=getEndAddress()));
}
// Resize an area.
status_t area::resize(size_t newSize) {
size_t oldSize =end_address-start_address+1;
size_t oldSize =pageCount*PAGE_SIZE+1;
// Duh. Nothing to do.
if (newSize==oldSize)
return B_OK;
// Grow the area. Figure out how many pages, allocate them and set them up
if (newSize>oldSize) {
int pageCount = (newSize - oldSize + PAGE_SIZE - 1) / PAGE_SIZE;
error ("Old size = %d, new size = %d, pageCount = %d\n",oldSize,newSize,pageCount);
vpage *newPage;
for (int i=0;i<pageCount;i++) {
newPage=new (vmBlock->vpagePool->get()) vpage;
newPage->setup(end_address+PAGE_SIZE*i-1,NULL,NULL,protection,state);
vpages.add(newPage);
if (newSize>oldSize) { // Grow the area. Figure out how many pages, allocate them and set them up
int newPageCount = (newSize - oldSize + PAGE_SIZE - 1) / PAGE_SIZE;
if (!mapAddressSpecToAddress(EXACT,(void *)(getEndAddress()+1),newPageCount)) // Ensure that the address space is available...
return B_ERROR;
int oldPageMax=vpagesOnIndexPage+(fullPages*vpagesOnNextPage); // Figure out what remaining, empty slots we have...
if (oldPageMax<newSize) { // Do we have enough room in the existing area?
page *oldIndexPage=indexPage; // Guess not. Remember where we were...
int oldVpagesOnIndexPage=vpagesOnIndexPage;
int oldVpagesOnNextPage=vpagesOnNextPage;
int oldFullPages=fullPages;
allocateVPages(newSize/PAGE_SIZE); // Get room
for (int i=0;i<oldSize/PAGE_SIZE;i++)//Shift over existing space...
if (i<=vpagesOnIndexPage)
memcpy(getNthVpage(i),(void *)((indexPage->getAddress()+sizeof(page *)*fullPages)+(sizeof(vpage)*i)),sizeof(vpage));
else {
page *myPage=(page *)(indexPage->getAddress()+((i-vpagesOnIndexPage-1)/vpagesOnNextPage)*sizeof(page *));
memcpy(getNthVpage(i),(void *)(myPage->getAddress()+(((i-vpagesOnIndexPage-1)%vpagesOnNextPage)*sizeof(vpage))),sizeof(vpage));
}
}
for (int i=oldSize;i<newSize;i+=PAGE_SIZE) { // Fill in the new space
vpage *newPage=new (getNthVpage(i/PAGE_SIZE)) vpage; // build a new vpage
newPage->setup(getEndAddress()+PAGE_SIZE*i-1,NULL,NULL,protection,state); // and set it up
}
error ("Old size = %d, new size = %d, pageCount = %d\n",oldSize,newSize,pageCount);
dump();
}
else { // Ewww. Shrinking. This is ugly right now.
size_t newFinalAddress=start_address+newSize;
vpage *oldPage;
for (hashIterate hi(vpages);node *cur=hi.get();) {
oldPage=reinterpret_cast<vpage *>(cur);
if (oldPage->getStartAddress() > (reinterpret_cast<void *> (newFinalAddress))) {
vpages.remove(cur);
if (finalWrite)
oldPage->flush();
oldPage->cleanup();
vmBlock->vpagePool->put(oldPage);
}
else { // Shrinking - not even going to free up the vpages - that could be a bad decision
size_t newFinalBlock=newSize/PAGE_SIZE;
for (int i=newFinalBlock;i<pageCount;i++) {
vpage *oldPage=getNthVpage(i);
if (finalWrite)
oldPage->flush();
oldPage->cleanup();
}
}
end_address=start_address+newSize;
return B_OK;
}
// When the protection for the area changes, the protection for every one of the pages must change
status_t area::setProtection(protectType prot) {
dump();
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
error ("setting protection on %x\n",page);
page->setProtection(prot);
}
@ -233,11 +288,6 @@ status_t area::setProtection(protectType prot) {
return B_OK;
}
vpage *area::findVPage(unsigned long address) {
vpage findMe(address);
// error ("area::findVPage: finding %ld\n",address);
return reinterpret_cast <vpage *>(vpages.find(&findMe));
}
// To fault, find the vpage associated with the fault and call it's fault function
bool area::fault(void *fault_address, bool writeError) { // true = OK, false = panic.
@ -281,8 +331,8 @@ void area::setInt(unsigned long address,int value) { // This is for testing only
// For every one of our vpages, call the vpage's pager
void area::pager(int desperation) {
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
if (page->pager(desperation))
out_count++;
}
@ -290,19 +340,17 @@ void area::pager(int desperation) {
// For every one of our vpages, call the vpage's saver
void area::saver(void) {
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
for (int vp=0;vp<pageCount;vp++) {
vpage *page=getNthVpage(vp);
page->saver();
}
}
void area::dump(void) {
error ("area::dump: size = %ld, lock = %d, address = %lx\n",end_address-start_address,state,start_address);
vpages.dump();
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
page->dump();
cur=cur->next;
error ("area::dump: size = %ld, lock = %d, address = %lx\n",pageCount*PAGE_SIZE,state,start_address);
for (int vp=0;vp<pageCount;vp++) {
error ("Dumping vpage %d of %d\n",vp,pageCount);
getNthVpage(vp)->dump();
}
}
@ -311,8 +359,7 @@ long area::get_memory_map(const void *address, ulong numBytes, physical_entry *t
long prevMem=0,tableEntry=-1;
// Cycle over each "page to find";
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage search(vbase+byteOffset);
vpage *found=reinterpret_cast<vpage *>(vpages.find(&search));
vpage *found=findVPage(((unsigned long)(address))+byteOffset);
if (!found) return B_ERROR;
unsigned long mem=found->getPhysPage()->getAddress();
if (mem!=prevMem+PAGE_SIZE) {
@ -321,7 +368,7 @@ long area::get_memory_map(const void *address, ulong numBytes, physical_entry *t
prevMem=mem;
table[tableEntry].address=(void *)mem;
}
table[tableEntry].size+=PAGE_SIZE;
table[tableEntry].size=+PAGE_SIZE;
}
if (++tableEntry==numEntries)
return B_ERROR; // Ran out of places to fill in
@ -331,8 +378,7 @@ long area::get_memory_map(const void *address, ulong numBytes, physical_entry *t
long area::lock_memory(void *address, ulong numBytes, ulong flags) {
unsigned long vbase=(unsigned long)address;
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage search(vbase+byteOffset);
vpage *found=reinterpret_cast<vpage *>(vpages.find(&search));
vpage *found=findVPage((unsigned long)address+byteOffset);
if (!found) return B_ERROR;
if (!found->lock(flags)) return B_ERROR;
}
@ -342,8 +388,7 @@ long area::lock_memory(void *address, ulong numBytes, ulong flags) {
long area::unlock_memory(void *address, ulong numBytes, ulong flags) {
unsigned long vbase=(unsigned long)address;
for (int byteOffset=0;byteOffset<=numBytes;byteOffset+=PAGE_SIZE) {
vpage search(vbase+byteOffset);
vpage *found=reinterpret_cast<vpage *>(vpages.find(&search));
vpage *found=findVPage((unsigned long)address+byteOffset);
if (!found) return B_ERROR;
found->unlock(flags);
}

View File

@ -2,8 +2,9 @@
#define _AREA_H
#include "OS.h"
#include "vm.h"
#include "page.h"
#include "lockedList.h"
#include "hashTable.h"
//#include "hashTable.h"
class areaManager;
class vpage;
@ -11,19 +12,30 @@ class vpage;
class area : public node
{
protected:
hashTable vpages;
char name[B_OS_NAME_LENGTH];
pageState state;
protectType protection;
bool finalWrite;
area_id areaID;
int in_count;
int out_count;
int copy_count;
areaManager *manager;
unsigned long start_address;
unsigned long end_address;
vpage *findVPage(unsigned long);
int fullPages; // full pages of vpage pointers
int vpagesOnIndexPage; // number of vpages stored on the index page
int vpagesOnNextPage; // number of vpages on each "fullPage"
page *indexPage; // physical page of the index page
int pageCount; // count of the number of pages in this area
char name[B_OS_NAME_LENGTH]; // Our name
pageState state; // Allocation policy.
protectType protection; // read, r/w, copy on write
bool finalWrite; // Write blocks in this area out on freeing area?
area_id areaID; // Our numeric ID.
int in_count; // Number of pages read in
int out_count; // Number of pages written out
int copy_count; // Number of block copies that have been made
areaManager *manager; // Our manager/process
unsigned long start_address; // Where we start
vpage *findVPage(unsigned long address) { // Returns the page for this address.
return getNthVpage((address-start_address)/PAGE_SIZE);
// error ("area::findVPage: finding %ld\n",address);
}
void allocateVPages(int pageCount); // Allocates blocks for the vpages
page *getNthPage(int pageNum) { return &(((page *)(indexPage->getAddress()))[pageNum]); }
public:
// Constructors and Destructors and related
area(void);
@ -47,11 +59,12 @@ class area : public node
status_t getInfo(area_info *dest);
int getAreaID(void) {return areaID;}
unsigned long getSize(void) {return getEndAddress()-getStartAddress();}
unsigned long getPageCount(void) {return (getEndAddress()-getStartAddress())/PAGE_SIZE;}
unsigned long getPageCount(void) {return (pageCount);}
areaManager *getAreaManager(void) {return manager;}
unsigned long getEndAddress(void) {return end_address;}
unsigned long getEndAddress(void) {return getStartAddress()+(PAGE_SIZE*pageCount)-1;}
unsigned long getStartAddress(void) {return start_address;}
const char *getName(void) {return name;}
vpage *getNthVpage(int pageNum);
// Debugging
void dump(void);
@ -62,7 +75,7 @@ class area : public node
// Comparisson with others
bool nameMatch(char *matchName) {return (strcmp(matchName,name)==0);}
bool couldAdd(unsigned long start,unsigned long end) { return ((end<start_address) || (start>end_address));}
bool couldAdd(unsigned long start,unsigned long end) { return ((end<start_address) || (start>getEndAddress()));}
bool contains(const void *address);
// External methods for "server" type calls

View File

@ -22,7 +22,7 @@ unsigned long areaManager::getNextAddress(int pages, unsigned long start) {
// This function needs to deal with the possibility that we run out of address space...
// areas.dump();
unsigned long end=start+(pages*PAGE_SIZE)-1;
for (struct node *cur=areas.rock;cur;cur=cur->next)
for (struct node *cur=areas.top();cur;cur=cur->next)
{
if (cur)
{
@ -77,7 +77,7 @@ area *areaManager::findArea(char *address) {
//error ("Finding area by string\n");
area *retVal=NULL;
lock();
for (struct node *cur=areas.rock;cur && !retVal;cur=cur->next)
for (struct node *cur=areas.top();cur && !retVal;cur=cur->next)
{
area *myArea=(area *)cur;
if (myArea->nameMatch(address))
@ -91,7 +91,7 @@ area *areaManager::findArea(char *address) {
area *areaManager::findArea(const void *address) {
// THIS DOES NOT HAVE LOCKING - all callers must lock.
// error ("Finding area by void * address\n");
for (struct node *cur=areas.rock;cur;cur=cur->next)
for (struct node *cur=areas.top();cur;cur=cur->next)
{
area *myArea=(area *)cur;
//error ("areaManager::findArea: Looking for %x between %x and %x\n",address,myArea->getStartAddress(),myArea->getEndAddress());
@ -114,7 +114,7 @@ area *areaManager::findAreaLock(area_id id) {
area *areaManager::findArea(area_id id) {
//error ("Finding area by area_id\n");
area *retVal=NULL;
for (struct node *cur=areas.rock;cur && !retVal;cur=cur->next)
for (struct node *cur=areas.top();cur && !retVal;cur=cur->next)
{
area *myArea=(area *)cur;
if (myArea->getAreaID()==id)
@ -166,7 +166,7 @@ int areaManager::createArea(char *AreaName,int pageCount,void **address, address
}
area *findAreaGlobal(int areaID) {
for (struct node *cur=vmBlock->areas.rock;cur;cur=cur->next) {
for (struct node *cur=vmBlock->areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
if (((area *)(cur))->getAreaID()==areaID)
return myArea;
@ -274,7 +274,7 @@ void areaManager::setInt(unsigned long address,int value) {
// Call pager for each of our areas
void areaManager::pager(int desperation) {
lock();
for (struct node *cur=areas.rock;cur;cur=cur->next) {
for (struct node *cur=areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
//error ("areaManager::pager; area = \n");
//myArea->dump();
@ -286,7 +286,7 @@ void areaManager::pager(int desperation) {
// Call saver for each of our areas
void areaManager::saver(void) {
lock();
for (struct node *cur=areas.rock;cur;cur=cur->next) {
for (struct node *cur=areas.top();cur;cur=cur->next) {
area *myArea=(area *)cur;
myArea->saver();
}
@ -388,3 +388,68 @@ long areaManager::unlock_memory(void *address, ulong numBytes, ulong flags) {
return retVal;
}
status_t areaManager::getAreaInfo(int areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea)
retVal=oldArea->getInfo(dest);
else
retVal=B_ERROR;
unlock();
return retVal;
}
int areaManager::getAreaByName(char *name) {
int retVal;
lock();
area *oldArea=findArea(name);
if (oldArea)
retVal= oldArea->getAreaID();
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t areaManager::setProtection(int areaID,protectType prot) {
status_t retVal;
error ("area::setProtection about to lock\n");
lock();
error ("area::setProtection locked\n");
area *myArea=findArea(areaID);
if (myArea)
retVal= myArea->setProtection(prot);
else
retVal= B_ERROR;
unlock();
error ("area::setProtection unlocked\n");
return retVal;
}
status_t areaManager::resizeArea(int Area,size_t size) {
status_t retVal;
lock();
area *oldArea=findArea(Area);
if (oldArea)
retVal= oldArea->resize(size);
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t areaManager::getInfoAfter(int32 & areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea->next)
{
area *newCurrent=(reinterpret_cast<area *>(oldArea->next));
retVal=newCurrent->getInfo(dest);
areaID=(int)newCurrent;
}
else
retVal=B_ERROR;
unlock();
return retVal;
}

View File

@ -4,10 +4,10 @@
class areaManager // One of these per process
{
private:
orderedList areas;
team_id team;
sem_id myLock;
static long nextAreaID;
orderedList areas; // A list, ordered by address, of our areas.
team_id team; // The team that we belong to
sem_id myLock; // a lock for adding/searching/removing teams
static long nextAreaID; // The next area id to be used. Should be in vmInterface or vmBlock
public:
// Constructors and Destructors and related
areaManager ();
@ -20,28 +20,8 @@ class areaManager // One of these per process
// Accessors
team_id getTeam(void) {return team;}
unsigned long getNextAddress(int pages,unsigned long minimum=USER_BASE);
status_t getAreaInfo(int areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea)
retVal=oldArea->getInfo(dest);
else
retVal=B_ERROR;
unlock();
return retVal;
}
int getAreaByName(char *name) {
int retVal;
lock();
area *oldArea=findArea(name);
if (oldArea)
retVal= oldArea->getAreaID();
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t getAreaInfo(int areaID,area_info *dest);
int getAreaByName(char *name);
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset);
status_t munmap(void *addr,size_t len);
@ -51,46 +31,9 @@ class areaManager // One of these per process
area *findArea(char *address);
area *findArea(area_id id);
area *findAreaLock(area_id id);
status_t setProtection(int areaID,protectType prot) {
status_t retVal;
error ("area::setProtection about to lock\n");
lock();
error ("area::setProtection locked\n");
area *myArea=findArea(areaID);
if (myArea)
retVal= myArea->setProtection(prot);
else
retVal= B_ERROR;
unlock();
error ("area::setProtection unlocked\n");
return retVal;
}
status_t resizeArea(int Area,size_t size) {
status_t retVal;
lock();
area *oldArea=findArea(Area);
if (oldArea)
retVal= oldArea->resize(size);
else
retVal= B_ERROR;
unlock();
return retVal;
}
status_t getInfoAfter(int32 & areaID,area_info *dest) {
status_t retVal;
lock();
area *oldArea=findArea(areaID);
if (oldArea->next)
{
area *newCurrent=(reinterpret_cast<area *>(oldArea->next));
retVal=newCurrent->getInfo(dest);
areaID=(int)newCurrent;
}
else
retVal=B_ERROR;
unlock();
return retVal;
}
status_t setProtection(int areaID,protectType prot);
status_t resizeArea(int Area,size_t size);
status_t getInfoAfter(int32 & areaID,area_info *dest);
void lock() { acquire_sem(myLock); }
void unlock() {release_sem(myLock);}
long get_memory_map(const void *address, ulong numBytes, physical_entry *table, long numEntries);

View File

@ -7,33 +7,25 @@
extern vmHeaderBlock *vmBlock;
// If we can get one from an existing block, cool. If not, get a new block, create as many as will fit in the block, put them on the free list and call ourself recursively
area *poolarea::get(void)
{
area *poolarea::get(void) {
area *ret=NULL;
if (unused.count())
{
if (unused.count()) {
//error ("poolarea::get: Getting an unused one!\n");
acquire_sem(inUse);
ret=(area *)unused.next();
release_sem(inUse);
}
if (ret)
{
if (ret) {
//error ("poolarea::get: Returning address:%x \n",ret);
return ret;
}
else
{
else {
page *newPage=vmBlock->pageMan->getPage();
//error ("poolarea::get: Getting new page %lx!\n",newPage->getAddress());
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(area);
acquire_sem(inUse);
//error ("poolarea::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((node *)(newPage->getAddress()+(i*sizeof(area)))));
release_sem(inUse);
return (get()); // A little cheat - call self again to get the first one from stack...
}
}

View File

@ -4,20 +4,16 @@ class area;
class poolarea
{
private:
list unused;
lockedList unused;
sem_id inUse;
public:
// Constructors and Destructors and related
poolarea(void) {
inUse = create_sem(1,"areapool");
}
poolarea(void) { }
// Mutators
area *get(void);
void put(area *in) {
acquire_sem(inUse);
unused.add((node *)in);
release_sem(inUse);
}
};

View File

@ -1,6 +1,5 @@
#include <new.h>
#include <cacheManager.h>
#include <vpagePool.h>
#include "vmHeaderBlock.h"
// functions for hash and isEqual. No surprises
@ -12,9 +11,7 @@ bool vnodeisEqual (node &vp,node &vp2) {
}
extern vmHeaderBlock *vmBlock;
// TODO - we need to (somehow) make sure that the same vnodes here are shared with mmap.
// Maybe a vnode manager...
// Make the cache lockable
cacheManager::cacheManager(void) : area (),cacheMembers(30) {
myLock=create_sem(1,"Cache Manager Semaphore");
cacheMembers.setHash(vnodeHash);
@ -42,31 +39,27 @@ void *cacheManager::findBlock(vnode *target,bool readOnly) {
// No cache hit found; have to make a new one. Find a virtual page, create a vnode, and map.
void *cacheManager::createBlock(vnode *target,bool readOnly, cacheMember *candidate) {
bool foundSpot=false;
vpage *prev=NULL,*cur=NULL;
vpage *cur=NULL;
unsigned long begin=CACHE_BEGIN;
// Find a place in the cache's virtual space to put this vnode...
if (vpages.rock)
for (cur=(reinterpret_cast <vpage *>(vpages.rock));!foundSpot && cur;cur=reinterpret_cast <vpage *>(cur->next))
if (cur->getStartAddress()!=(void *)begin)
foundSpot=true;
else { // no joy
begin+=PAGE_SIZE;
prev=cur;
}
lock();
// Find a place in the cache's virtual space to put this vnode...
// This *HAS* to succeed, because the address space should be larger than physical memory...
for (int i=0;!foundSpot && i<pageCount;i++) {
cur=getNthVpage(i);
foundSpot=!cur->getPhysPage();
}
// Create a vnode here
vpage *newPage = new (vmBlock->vpagePool->get()) vpage;
newPage->setup(begin,target,NULL,((readOnly)?readable:writable),NO_LOCK);
vpages.add(newPage);
cacheMembers.add(newPage);
cur->setup((unsigned long)(cur->getStartAddress()),target,NULL,((readOnly)?readable:writable),NO_LOCK);
cacheMembers.add(cur);
// While this may not seem like a good idea (since this only happens on a write),
// it is because someone may only write to part of the file/page...
if (candidate)
memcpy(newPage->getStartAddress(),candidate->vp->getStartAddress(),PAGE_SIZE);
memcpy(cur->getStartAddress(),candidate->vp->getStartAddress(),PAGE_SIZE);
unlock();
// return address from this vnode
return (void *)begin;
return cur->getStartAddress();
}
void *cacheManager::readBlock(vnode *target) {

View File

@ -75,7 +75,7 @@ class hashTable : public list
if (!isEqual)
throw ("Attempting to use a hash table without setting up an 'isEqual' function");
unsigned long hashValue=hash(*findNode)%numRocks;
for (struct node *cur=rocks[hashValue]->rock;cur ;cur=cur->next)
for (struct node *cur=rocks[hashValue]->top();cur ;cur=cur->next)
if (isEqual(*findNode,*cur))
return cur;
return NULL;
@ -91,7 +91,7 @@ class hashTable : public list
// Debugging
void dump(void) {
for (int i=0;i<numRocks;i++)
for (struct node *cur=rocks[i]->rock;cur;cur=cur->next)
for (struct node *cur=rocks[i]->top();cur;cur=cur->next)
error ("hashTable::dump: On bucket %d of %d, At %p, next = %p\n",i,numRocks,cur,cur->next);
}
bool ensureSane (void) {

View File

@ -76,8 +76,8 @@ class list {
return true;
}
protected:
struct node *rock;
int nodeCount;
private:
};
#endif

View File

@ -8,7 +8,7 @@ class page : public node {
void *physicalAddress;
public:
// Constructors and Destructors and related
page(void) {cpuSpecific=NULL;physicalAddress=NULL;};
page(void) {};
void setup (void *address) {count=0;physicalAddress=address;};
// Accessors

View File

@ -17,12 +17,15 @@ void pageManager::setup(void *area,int pages) {
int pageOverhead=((pages*sizeof(page))+(PAGE_SIZE-1))/PAGE_SIZE;
for (int i=0;i<pages-pageOverhead;i++) {
page *newPage=(page *)(addOffset(area,i*sizeof(page)));
error ("Setting up a page object at %x, passing in %x\n",
newPage,
addOffset(area,(i+pageOverhead)*PAGE_SIZE));
newPage->setup(addOffset(area,(i+pageOverhead)*PAGE_SIZE));
unused.add(newPage);
}
totalPages=pages;
//error ("pageManager::setup - %d pages ready to rock and roll\n",unused.count());
//error ("pageManager::setup - %d pages ready to top() and roll\n",unused.count());
}
// Try to get a clean page first. If that fails, get a dirty one and clean it. Loop on this.
@ -39,7 +42,7 @@ page *pageManager::getPage(void) {
ret->zero();
} // This could fail if someone swooped in and stole our page.
}
//error ("pageManager::getPage - returning page %x, clean = %d, unused = %d, inuse = %x\n",ret,clean.count(),unused.count(),inUse.count());
error ("pageManager::getPage - returning page %x (phys address %x), clean = %d, unused = %d, inuse = %x\n",ret,ret->getAddress(),clean.count(),unused.count(),inUse.count());
inUse.add(ret);
ret->count++;
if (!ret)
@ -88,7 +91,7 @@ int pageManager::desperation(void) { // Formula to determine how desperate syste
void pageManager::dump(void) {
error ("Dumping the unused list (%d entries)\n",getUnusedCount());
unused.lock();
for (struct node *cur=unused.rock;cur;) {
for (struct node *cur=unused.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;
@ -96,7 +99,7 @@ void pageManager::dump(void) {
unused.unlock();
error ("Dumping the clean list (%d entries)\n",getCleanCount());
clean.lock();
for (struct node *cur=clean.rock;cur;) {
for (struct node *cur=clean.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;
@ -104,7 +107,7 @@ void pageManager::dump(void) {
error ("Dumping the inuse list (%d entries)\n",getInUseCount());
clean.unlock();
inUse.lock();
for (struct node *cur=inUse.rock;cur;) {
for (struct node *cur=inUse.top();cur;) {
page *thisPage=(page *)cur;
thisPage->dump();
cur=cur->next;

View File

@ -14,7 +14,6 @@ swapFileManager::swapFileManager(void)
swapFile = open("/boot/var/tmp/OBOS_swap",O_RDWR|O_CREAT,0x777 );
if (swapFile==-1)
error ("swapfileManager::swapFileManger: swapfile not opened, errno = %ul, %s\n",errno,strerror(errno));
lockFreeList=create_sem(1,"SwapFile Free List Semaphore"); // Should have team name in it.
}
// Try to get a page from the free list. If not, make a new page
@ -25,10 +24,8 @@ vnode &swapFileManager::findNode(void)
//error ("swapFileManager::findNode: Finding a new node for you, Master: ");
vnode *newNode;
//error ("locking in sfm\n");
lock();
newNode=reinterpret_cast<vnode *>(swapFileFreeList.next());
//error ("unlocking in sfm\n");
unlock();
if (!newNode)
{
newNode=new (vmBlock->vnodePool->get()) vnode;
@ -48,11 +45,9 @@ void swapFileManager::freeVNode(vnode &v)
if (!v.vpages.count())
{
//error ("locking in sfm\n");
lock();
//error ("swapFileManager::freeNode: Starting Freeing a new node for you, Master: offset:%d\n",v.offset);
v.valid=false;
swapFileFreeList.add(&v);
//error ("unlocking in sfm\n");
unlock();
}
}

View File

@ -4,13 +4,13 @@
#include <fcntl.h>
#include "vm.h"
#include "OS.h"
#include "lockedList.h"
class swapFileManager {
private:
int swapFile;
unsigned long maxNode;
list swapFileFreeList;
sem_id lockFreeList;
lockedList swapFileFreeList;
public:
// Constructors and Destructors and related
@ -19,10 +19,8 @@ class swapFileManager {
// Mutators
vnode &findNode(void); // Get an unused node
void write_block(vnode &node,void *loc,unsigned long size);
void write_block(vnode &node,void *loc,unsigned long size); // The general access points
void read_block(vnode &node,void *loc,unsigned long size);
void lock() {acquire_sem(lockFreeList);}
void unlock() {release_sem(lockFreeList);}
// Accessors
int getFD(void) {return swapFile;}

View File

@ -4,7 +4,6 @@
#ifndef I_AM_VM_INTERFACE
class poolarea;
class poolvpage;
class poolvnode;
class pageManager;
class swapFileManager;
@ -16,7 +15,6 @@ class lockedList;
struct vmHeaderBlock
{
poolarea *areaPool;
poolvpage *vpagePool;
poolvnode *vnodePool;
pageManager *pageMan;
swapFileManager *swapMan;

View File

@ -3,7 +3,6 @@
#include "lockedList.h"
#include "area.h"
#include "areaPool.h"
#include "vpagePool.h"
#include "vnodePool.h"
#include "pageManager.h"
#include "swapFileManager.h"
@ -78,7 +77,7 @@ vmInterface::vmInterface(int pages)
}
error ("Allocated an area. Address = %x\n",vmBlock);
// Figure out how many pages we need
int pageCount = (sizeof(poolarea)+sizeof(poolvpage)+sizeof(poolvnode)+sizeof(pageManager)+sizeof(swapFileManager)+sizeof(cacheManager)+sizeof(vmHeaderBlock)+PAGE_SIZE-1)/PAGE_SIZE;
int pageCount = (sizeof(poolarea)+sizeof(poolvnode)+sizeof(pageManager)+sizeof(swapFileManager)+sizeof(cacheManager)+sizeof(vmHeaderBlock)+PAGE_SIZE-1)/PAGE_SIZE;
if (pageCount >=pages)
{
error ("Hey! Go buy some ram! Trying to create a VM with fewer pages than the setup will take!\n");
@ -93,8 +92,6 @@ vmInterface::vmInterface(int pages)
//error ("Set up Page Man\n");
vmBlock->areaPool = new (currentAddress) poolarea;
currentAddress=addToPointer(currentAddress,sizeof(poolarea));
vmBlock->vpagePool = new (currentAddress) poolvpage;
currentAddress=addToPointer(currentAddress,sizeof(poolvpage));
vmBlock->vnodePool = new (currentAddress) poolvnode;
currentAddress=addToPointer(currentAddress,sizeof(poolvnode));
vmBlock->swapMan = new (currentAddress) swapFileManager;
@ -193,7 +190,7 @@ int vmInterface::getAreaByName(char *name)
{
int retVal=B_NAME_NOT_FOUND;
vmBlock->areas.lock();
for (struct node *cur=vmBlock->areas.rock;cur && retVal==B_NAME_NOT_FOUND;cur=cur->next) {
for (struct node *cur=vmBlock->areas.top();cur && retVal==B_NAME_NOT_FOUND;cur=cur->next) {
area *myArea=(area *)cur;
error ("vmInterface::getAreaByName comapring %s to passed in %s\n",myArea->getName(),name);
if (myArea->nameMatch(name))

View File

@ -4,33 +4,25 @@
#include "pageManager.h"
extern vmHeaderBlock *vmBlock;
vnode *poolvnode::get(void)
{
vnode *poolvnode::get(void) {
vnode *ret=NULL;
if (unused.count())
{
if (unused.count()) {
//error ("poolvnode::get: Getting an unused one!\n");
acquire_sem(inUse);
ret=(vnode *)unused.next();
release_sem(inUse);
}
if (ret)
{
if (ret) {
//error ("poolvnode::get: Returning address:%x \n",ret);
return ret;
}
else
{
else {
page *newPage=vmBlock->pageMan->getPage();
//error ("poolvnode::get: Getting new page %lx!\n",newPage->getAddress());
if (!newPage)
throw ("Out of pages to allocate a pool!");
int newCount=PAGE_SIZE/sizeof(vnode);
acquire_sem(inUse);
//error ("poolvnode::get: Adding %d new elements to the pool!\n",newCount);
for (int i=0;i<newCount;i++)
unused.add(((node *)(newPage->getAddress()+(i*sizeof(vnode)))));
release_sem(inUse);
return (get()); // A little cheat - call self again to get the first one from stack...
}
}

View File

@ -5,19 +5,11 @@ class vnode;
class poolvnode
{
private:
list unused;
sem_id inUse;
lockedList unused;
public:
poolvnode(void)
{
inUse = create_sem(1,"vnodepool");
}
poolvnode(void) {;}
vnode *get(void);
void put(vnode *in)
{
acquire_sem(inUse);
unused.add((node *)in);
release_sem(inUse);
}
{ unused.add((node *)in); }
};

View File

@ -11,7 +11,7 @@ extern vmHeaderBlock *vmBlock;
// Write this vpage out if necessary
void vpage::flush(void) {
if (physPage && protection==writable && dirty) {
if (physPage && getProtection()==writable && isDirty()) {
// error ("vpage::write_block: writing, backingNode->fd = %d, backingNode->offset = %d, address = %x\n",backingNode->fd, backingNode->offset,physPage->getAddress());
if (-1==lseek(backingNode->fd,backingNode->offset,SEEK_SET))
error ("vpage::flush:seek failed, fd = %d, errno = %d, %s\n",backingNode->fd,errno,strerror(errno));
@ -36,7 +36,7 @@ void vpage::refresh(void) {
}
// Simple, empty constructor
vpage::vpage(void) : physPage(NULL),backingNode(NULL),protection(none),dirty(false),swappable(false),start_address(0),end_address(0), locked(false)
vpage::vpage(void) : physPage(NULL),backingNode(NULL),start_address(0),bits(0)
{
}
@ -45,14 +45,16 @@ vpage::vpage(void) : physPage(NULL),backingNode(NULL),protection(none),dirty(fal
void vpage::setup(unsigned long start,vnode *backing, page *physMem,protectType prot,pageState state, mmapSharing share) {
// Basic setup from parameters
vpage *clonedPage; // This is the page that this page is to be the clone of...
// error ("vpage::setup: start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
error ("vpage::setup: start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
physPage=physMem;
backingNode=backing;
protection=prot;
dirty=false;
swappable=(state==NO_LOCK);
setProtection(prot);
error ("vpage::setup: fields, part 1 set\n");
dirty(false);
swappable(state==NO_LOCK);
locked(state==NO_LOCK);
start_address=start;
end_address=start+PAGE_SIZE-1;
error ("vpage::setup: fields, part 2 set\n");
// Set up the backing store. If one is specified, use it; if not, get a swap file page.
if (backingNode) { // This is an mmapped file (or a cloned area)
@ -65,7 +67,7 @@ void vpage::setup(unsigned long start,vnode *backing, page *physMem,protectType
case PRIVATE: // This is a one way share - we get others changes (until we make a change) but no one gets our changes
clonedPage=vmBlock->vnodeMan->addVnode(*backingNode,*this,&backingNode); // Use the reference version which will make a new one if this one is not found
if (clonedPage) physPage=clonedPage->physPage;
protection=(protection<=readable)?protection: copyOnWrite;
setProtection((getProtection()<=readable)?getProtection(): copyOnWrite);
break;
case COPY: // This is not shared - get a fresh page and fresh swap file space and copy the original page
physPage=vmBlock->pageMan->getPage();
@ -83,17 +85,18 @@ void vpage::setup(unsigned long start,vnode *backing, page *physMem,protectType
backingNode=&(vmBlock->swapMan->findNode());
clonedPage=vmBlock->vnodeMan->addVnode(backingNode,*this); // Use the pointer version which will use this one. Should always return NULL
}
error ("vpage::setup: Backing node set up\n");
// If there is no physical page already and we can't wait to get one, then get one now
if (!physPage && (state!=LAZY) && (state!=NO_LOCK)) {
physPage=vmBlock->pageMan->getPage();
// error ("vpage::setup, state = %d, allocated page %x\n",state,physPage);
error ("vpage::setup, state = %d, allocated page %x\n",state,physPage);
}
else { // We either don't need it or we already have it.
if (physPage)
atomic_add(&(physPage->count),1);
}
// error ("vpage::setup: ended : start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
error ("vpage::setup: ended : start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
}
// Destruction.
@ -113,7 +116,7 @@ void vpage::cleanup(void) {
// Change this pages protection
void vpage::setProtection(protectType prot) {
protection=prot;
protection(prot);
// Change the hardware
}
@ -121,16 +124,16 @@ void vpage::setProtection(protectType prot) {
// true = OK, false = panic.
bool vpage::fault(void *fault_address, bool writeError, int &in_count) {
// error ("vpage::fault: virtual address = %lx, write = %s\n",(unsigned long) fault_address,((writeError)?"true":"false"));
if (writeError && protection != copyOnWrite && protection != writable)
if (writeError && getProtection() != copyOnWrite && getProtection() != writable)
return false;
if (writeError && physPage) { // If we already have a page and this is a write, it is either a copy on write or a "dirty" notice
dirty=true;
if (protection==copyOnWrite) { // Else, this was just a "let me know when I am dirty"...
dirty(true);
if (getProtection()==copyOnWrite) { // Else, this was just a "let me know when I am dirty"...
page *newPhysPage=vmBlock->pageMan->getPage();
// error ("vpage::fault - copy on write allocated page %x\n",newPhysPage);
memcpy((void *)(newPhysPage->getAddress()),(void *)(physPage->getAddress()),PAGE_SIZE);
physPage=newPhysPage;
protection=writable;
setProtection(writable);
vmBlock->vnodeMan->remove(*backingNode,*this);
backingNode=&(vmBlock->swapMan->findNode()); // Need new backing store for this node, since it was copied, the original is no good...
vmBlock->vnodeMan->addVnode(backingNode,*this);
@ -148,7 +151,7 @@ bool vpage::fault(void *fault_address, bool writeError, int &in_count) {
// This refresh is unneeded if the data was never written out...
// dump();
refresh(); // I wonder if these vnode calls are safe during an interrupt...
dirty=writeError; // If the client is writing, we are now dirty (or will be when we get back to user land)
dirty(writeError); // If the client is writing, we are now dirty (or will be when we get back to user land)
in_count++;
//error ("vpage::fault: Refreshed\n");
// //dump();
@ -157,7 +160,7 @@ bool vpage::fault(void *fault_address, bool writeError, int &in_count) {
}
bool vpage::lock(long flags) {
locked=true;
locked(true);
if (!physPage) {
physPage=vmBlock->pageMan->getPage();
if (!physPage)
@ -169,8 +172,8 @@ bool vpage::lock(long flags) {
void vpage::unlock(long flags) {
if ((flags & B_DMA_IO) || (!(flags & B_READ_DEVICE)))
dirty=true;
locked=false;
dirty(true);
locked(false);
}
char vpage::getByte(unsigned long address,areaManager *manager) {
@ -186,7 +189,7 @@ void vpage::setByte(unsigned long address,char value,areaManager *manager) {
if (!physPage)
if (!manager->fault((void *)(address),true))
throw ("vpage::setByte");
if (protection>=writable)
if (getProtection()>=writable)
*((char *)(address-start_address+physPage->getAddress()))=value;
else
throw ("vpage::setByte - no permission to write");
@ -208,7 +211,7 @@ void vpage::setInt(unsigned long address,int value,areaManager *manager) {
if (!physPage)
if (!manager->fault((void *)(address),true))
throw ("vpage::setInt");
if (protection>=writable)
if (getProtection()>=writable)
*((int *)(address-start_address+physPage->getAddress()))=value;
else
throw ("vpage::setInt - no permission to write");
@ -218,15 +221,15 @@ void vpage::setInt(unsigned long address,int value,areaManager *manager) {
// Swaps pages out where necessary.
bool vpage::pager(int desperation) {
//error ("vpage::pager start desperation = %d\n",desperation);
if (!swappable)
if (!isSwappable())
return false;
//error ("vpage::pager swappable\n");
switch (desperation) {
case 1: return false; break;
case 2: if (!physPage || protection!=readable || locked) return false;break;
case 3: if (!physPage || dirty || locked) return false;break;
case 4: if (!physPage || locked) return false;break;
case 5: if (!physPage || locked) return false;break;
case 2: if (!physPage || getProtection()!=readable || isLocked()) return false;break;
case 3: if (!physPage || isDirty() || isLocked()) return false;break;
case 4: if (!physPage || isLocked()) return false;break;
case 5: if (!physPage || isLocked()) return false;break;
default: return false;break;
}
//error ("vpage::pager flushing\n");
@ -240,8 +243,8 @@ bool vpage::pager(int desperation) {
// Saves dirty pages
void vpage::saver(void) {
if (dirty) {
if (isDirty()) {
flush();
dirty=false;
dirty(false);
}
}

View File

@ -9,17 +9,13 @@ class vpage : public node
{
private:
page *physPage;
vnode *backingNode;
protectType protection;
bool dirty;
bool swappable;
bool locked;
unsigned long start_address;
unsigned long end_address;
vnode *backingNode;
char bits; // 0/1 are protection, 2 == dirty, 3 == swappable, 4 == locked
unsigned long start_address; //bye
public:
// Constructors and Destructors and related
vpage(void);
vpage(unsigned long address) {start_address=address-address%PAGE_SIZE;end_address=start_address+PAGE_SIZE-1;} // Only for lookups
vpage(unsigned long address) {start_address=address-address%PAGE_SIZE;} // Only for lookups
// Setup should now only be called by the vpage manager...
void setup(unsigned long start,vnode *backing, page *physMem,protectType prot,pageState state, mmapSharing share=CLONEAREA); // backing and/or physMem can be NULL/0.
void cleanup(void);
@ -30,19 +26,26 @@ class vpage : public node
void refresh(void); // Read page back in from vnode
bool lock(long flags); // lock this page into memory
void unlock(long flags); // unlock this page from memory
void dirty(bool yesOrNo) {if (yesOrNo) bits|=4; else bits &= ~4;}
void swappable(bool yesOrNo) {if (yesOrNo) bits|=8; else bits &= ~8;}
void locked(bool yesOrNo) {if (yesOrNo) bits|=16; else bits &= ~16;}
void protection(protectType prot) {bits|= (prot & 3);}
// Accessors
protectType getProtection(void) {return protection;}
protectType getProtection(void) {return (protectType)(bits & 3);}
bool isDirty(void) {return bits & 4;}
bool isSwappable(void) {return bits & 8;}
bool isLocked(void) {return bits & 16;}
void *getStartAddress(void) {return (void *)start_address;}
page *getPhysPage(void) {return physPage;}
vnode *getBacking(void) {return backingNode;}
bool isMapped(void) {return (physPage);}
unsigned long end_address(void) {return start_address+PAGE_SIZE;}
// Comparisson with others
ulong hash(void) {return start_address >> BITS_IN_PAGE_SIZE;}
bool operator==(vpage &rhs) {return rhs.start_address==start_address && rhs.end_address==end_address;}
bool contains(uint32 address) { return ((start_address<=address) && (end_address>=address)); }
bool operator==(vpage &rhs) {return rhs.start_address==start_address; }
bool contains(uint32 address) { return ((start_address<=address) && (end_address()>=address)); }
// External methods for "server" type calls
bool fault(void *fault_address, bool writeError, int &in_count); // true = OK, false = panic.
@ -53,7 +56,7 @@ class vpage : public node
void dump(void) {
error ("Dumping vpage %p, address = %lx, vnode-fd=%d, vnode-offset = %d, dirty = %d, swappable = %d, locked = %d\n",
this,start_address, ((backingNode)?(backingNode->fd):99999), ((backingNode)?(backingNode->offset):999999999),
dirty,swappable,locked);
isDirty(),isSwappable(),isLocked());
if (physPage)
physPage->dump();
else