Implemented the sharing of vnodes. Hopefully mmap and file cache and cloned

areas will not step on each others feet...
Added comments all over the place.
Bugs squished


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@2248 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Phipps 2002-12-15 07:05:38 +00:00
parent 2674719b33
commit ceb3763e56
15 changed files with 191 additions and 101 deletions

View File

@ -1,8 +1,8 @@
SubDir OBOS_TOP src kernel vm2 ;
BinCommand vmTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C test.C : root be ;
BinCommand simpleTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C simpleTest.C : root be ;
BinCommand hashTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C hashTest.C : root be ;
BinCommand vmTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C vnodeManager.C test.C : root be ;
BinCommand simpleTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C vnodeManager.C simpleTest.C : root be ;
BinCommand hashTest : error.C area.C areaManager.C cacheManager.C page.C pageManager.C swapFileManager.C vmInterface.C vpage.C areaPool.C vnodePool.C vpagePool.C vnodeManager.C hashTest.C : root be ;
BinCommand olTest : error.C olTest.C : root be ;
BinCommand pmTest : error.C pageManager.C pageManTest.C page.C : root be ;

View File

@ -12,17 +12,20 @@ extern vmHeaderBlock *vmBlock;
ulong vpageHash (node &vp) {return reinterpret_cast <vpage &>(vp).hash();}
bool vpageisEqual (node &vp,node &vp2) {return reinterpret_cast <vpage &>(vp)==reinterpret_cast <vpage &>(vp2);}
// Simple constructor; real work is later
area::area(void) : vpages(AREA_HASH_TABLE_SIZE) {
vpages.setHash(vpageHash);
vpages.setIsEqual(vpageisEqual);
}
// Not much here, either
void area::setup (areaManager *myManager) {
//error ("area::setup setting up new area\n");
manager=myManager;
//error ("area::setup done setting up new area\n");
}
// Decide which algorithm to use for finding the next virtual address and try to find one.
unsigned long area::mapAddressSpecToAddress(addressSpec type,void * req,int pageCount) {
// We will lock in the callers
unsigned long base,requested=(unsigned long)req;
@ -49,31 +52,33 @@ unsigned long area::mapAddressSpecToAddress(addressSpec type,void * req,int page
return base;
}
// This is the really interesting part of creating an area
status_t area::createAreaGuts( char *inName, int pageCount, void **address, addressSpec type, pageState inState, protectType protect, bool inFinalWrite, int fd, size_t offset, area *originalArea=NULL /* For clone only*/) {
error ("area::createAreaGuts : name = %s, pageCount = %d, address = %lx, addressSpec = %d, pageState = %d, protection = %d, inFinalWrite = %d, fd = %d, offset = %d,originalArea=%ld\n",
inName,pageCount,address,type,inState,protect,inFinalWrite,fd,offset,originalArea);
strcpy(name,inName);
vpage *newPage;
// Get an address to start this area at
unsigned long base=mapAddressSpecToAddress(type,*address,pageCount);
if (base==0)
return B_ERROR;
// Set up some basic info
strcpy(name,inName);
state=inState;
start_address=base;
end_address=base+(pageCount*PAGE_SIZE)-1;
*address=(void *)base;
finalWrite=inFinalWrite;
// For non-cloned areas, make a new vpage for every page necesssary.
if (originalArea==NULL) // Not for cloning
for (int i=0;i<pageCount;i++) {
newPage=new (vmBlock->vpagePool->get()) vpage;
if (fd) {
// vnode *newVnode=new (vmBlock->vnodePool->get()) vnode;
void *currentMemoryLocation;
// A future implementation vnode *newVnode=vmBlock->vnodePool->getNode(fd,offset,PAGE_SIZE*i,true,currentMemoryLocation);
vnode *newVnode=vmBlock->vnodePool->get();
newVnode->fd=fd;
newVnode->offset=offset;
newPage->setup(base+PAGE_SIZE*i,newVnode,NULL,protect,inState);
vnode newVnode;
newVnode.fd=fd;
newVnode.offset=offset;
// vmBlock->vnodeManager->addVNode(newVnode,newPage);
newPage->setup(base+PAGE_SIZE*i,&newVnode,NULL,protect,inState);
}
else
newPage->setup(base+PAGE_SIZE*i,NULL,NULL,protect,inState);
@ -81,7 +86,7 @@ status_t area::createAreaGuts( char *inName, int pageCount, void **address, addr
}
else // cloned
// Need to lock other area, here, just in case...
// Make a copy of each page in the other area...
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
newPage=new (vmBlock->vpagePool->get()) vpage;
@ -101,12 +106,13 @@ status_t area::createArea(char *inName, int pageCount,void **address, addressSpe
return createAreaGuts(inName,pageCount,address,type,inState,protect,false,0,0);
}
// Clone another area.
status_t area::cloneArea(area *origArea, char *inName, void **address, addressSpec type,pageState inState,protectType protect) {
if (type==CLONE) {
*address=(void *)(origArea->getStartAddress());
type=EXACT;
}
if (origArea->getAreaManager()!=manager) {
if (origArea->getAreaManager()!=manager) { // If they are in different areas...
origArea->getAreaManager()->lock(); // This is just begging for a deadlock...
status_t retVal = createAreaGuts(inName,origArea->getPageCount(),address,type,inState,protect,false,0,0,origArea);
origArea->getAreaManager()->unlock();
@ -116,6 +122,7 @@ status_t area::cloneArea(area *origArea, char *inName, void **address, addressSp
return createAreaGuts(inName,origArea->getPageCount(),address,type,inState,protect,false,0,0,origArea);
}
// To free an area, interate over its poges, final writing them if necessary, then call cleanup and put the vpage back in the pool
void area::freeArea(void) {
//error ("area::freeArea: starting \n");
@ -136,6 +143,7 @@ void area::freeArea(void) {
//error ("area::freeArea: ending \n");
}
// Get area info
status_t area::getInfo(area_info *dest) {
dest->area=areaID;
strcpy(dest->name,name);
@ -166,10 +174,13 @@ bool area::contains(void *address) {
return ((start_address<=base) && (base<=end_address));
}
// Resize an area.
status_t area::resize(size_t newSize) {
size_t oldSize =end_address-start_address;
// Duh. Nothing to do.
if (newSize==oldSize)
return B_OK;
// Grow the area. Figure out how many pages, allocate them and set them up
if (newSize>oldSize) {
int pageCount = (newSize - oldSize + PAGE_SIZE - 1) / PAGE_SIZE;
vpage *newPage;
@ -180,7 +191,7 @@ status_t area::resize(size_t newSize) {
}
end_address+=start_address+newSize;
}
else {
else { // Ewww. Shrinking. This is ugly right now.
int pageCount = (oldSize - newSize + PAGE_SIZE - 1) / PAGE_SIZE;
vpage *oldPage;
struct node *cur;
@ -192,6 +203,7 @@ status_t area::resize(size_t newSize) {
maxAddress=curAddress;
max=cur;
}
// Found the right one to removei; waste it, pool it, and move on
oldPage=reinterpret_cast<vpage *>(max);
vpages.remove(cur);
if (finalWrite)
@ -203,6 +215,7 @@ status_t area::resize(size_t newSize) {
return B_OK;
}
// When the protection for the area changes, the protection for every one of the pages must change
status_t area::setProtection(protectType prot) {
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
@ -217,6 +230,7 @@ vpage *area::findVPage(unsigned long address) {
return reinterpret_cast <vpage *>(vpages.find(&findMe));
}
// To fault, find the vpage associated with the fault and call it's fault function
bool area::fault(void *fault_address, bool writeError) { // true = OK, false = panic.
vpage *page=findVPage((unsigned long)fault_address);
if (page)
@ -253,6 +267,7 @@ void area::setInt(unsigned long address,int value) { // This is for testing only
page->setInt(address,value,manager);
}
// For every one of our vpages, call the vpage's pager
void area::pager(int desperation) {
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;
@ -260,6 +275,7 @@ void area::pager(int desperation) {
}
}
// For every one of our vpages, call the vpage's saver
void area::saver(void) {
for (hashIterate hi(vpages);node *cur=hi.get();) {
vpage *page=(vpage *)cur;

View File

@ -10,6 +10,7 @@ bool areaIsLessThan(void *a,void *b)
return (((reinterpret_cast<area *>(a))->getStartAddress()) < (reinterpret_cast<area *>(b))->getStartAddress());
}
// This creates the one true lock for this area
areaManager::areaManager(void)
{
team=0; // should be proc_get_current_proc_id()
@ -18,6 +19,7 @@ areaManager::areaManager(void)
areas.setIsLessThan(areaIsLessThan);
}
// Loops over every area looking for someplace where we can get the space we need.
unsigned long areaManager::getNextAddress(int pages, unsigned long start)
{
// This function needs to deal with the possibility that we run out of address space...
@ -39,6 +41,7 @@ unsigned long areaManager::getNextAddress(int pages, unsigned long start)
return start;
}
// Remove the area from our list, put it on the area pool and move on
void areaManager::freeArea(area_id areaID)
{
error ("areaManager::freeArea: begin\n");
@ -68,6 +71,7 @@ area *areaManager::findAreaLock(void *address)
return retVal;
}
// Loops over our areas looking for this one by name
area *areaManager::findArea(char *address)
{
error ("Finding area by string\n");
@ -83,6 +87,7 @@ area *areaManager::findArea(char *address)
return retVal;
}
// Loops over our areas looking for the one whose virtual address matches the passed in address
area *areaManager::findArea(void *address)
{
// THIS DOES NOT HAVE LOCKING - all callers must lock.
@ -107,6 +112,7 @@ area *areaManager::findAreaLock(area_id id)
return retVal;
}
// Loops over our areas looking for the one whose ID was passed in
area *areaManager::findArea(area_id id)
{
//error ("Finding area by area_id\n");
@ -120,6 +126,7 @@ area *areaManager::findArea(area_id id)
return retVal;
}
// Find the area whose address matches this page fault and dispatch the fault to it.
bool areaManager::fault(void *fault_address, bool writeError) // true = OK, false = panic.
{
area *myArea;
@ -137,6 +144,7 @@ bool areaManager::fault(void *fault_address, bool writeError) // true = OK, fals
long areaManager::nextAreaID=0;
// Create an area; get a new structure, call setup, create the guts, set its ID, add it to our list
int areaManager::createArea(char *AreaName,int pageCount,void **address, addressSpec addType,pageState state,protectType protect)
{
error ("areaManager::createArea - Creating an area\n");
@ -159,6 +167,8 @@ int areaManager::createArea(char *AreaName,int pageCount,void **address, address
return retVal;
}
// FIX: THIS IS WRONG! It will only clone areas in our areaManager.
// Should: find the specified area, create a new area to be its clone, and set it up
int areaManager::cloneArea(int newAreaID,char *AreaName,void **address, addressSpec addType,pageState state,protectType protect)
{
int retVal;
@ -251,6 +261,7 @@ void areaManager::setInt(unsigned long address,int value)
unlock();
}
// Call pager for each of our areas
void areaManager::pager(int desperation)
{
lock();
@ -264,6 +275,7 @@ void areaManager::pager(int desperation)
unlock();
}
// Call saver for each of our areas
void areaManager::saver(void)
{
lock();
@ -275,6 +287,7 @@ void areaManager::saver(void)
unlock();
}
// mmap is basically map POSIX values to ours and call createAreaMappingFile...
void *areaManager::mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset)
{
char name[MAXPATHLEN];
@ -306,6 +319,7 @@ void *areaManager::mmap(void *addr, size_t len, int prot, int flags, int fd, off
return addr;
}
// Custom area destruction for mapped files
status_t areaManager::munmap(void *addr,size_t len)
{
// Note that this is broken for any and all munmaps that are not full area in size. This is an all or nothing game...

View File

@ -6,6 +6,7 @@
extern vmHeaderBlock *vmBlock;
// If we can get one from an existing block, cool. If not, get a new block, create as many as will fit in the block, put them on the free list and call ourself recursively
area *poolarea::get(void)
{
area *ret=NULL;

View File

@ -3,6 +3,7 @@
#include <vpagePool.h>
#include "vmHeaderBlock.h"
// functions for hash and isEqual. No surprises
ulong vnodeHash (node &vp) {vnode &vn=reinterpret_cast <vnode &>(vp); return vn.offset+vn.fd;}
bool vnodeisEqual (node &vp,node &vp2) {
vnode &vn=reinterpret_cast <vnode &>(vp);
@ -13,19 +14,21 @@ bool vnodeisEqual (node &vp,node &vp2) {
extern vmHeaderBlock *vmBlock;
// TODO - we need to (somehow) make sure that the same vnodes here are shared with mmap.
// Maybe a vnode manager...
// Make the cache lockable
cacheManager::cacheManager(void) : area (),cacheMembers(30) {
myLock=create_sem(1,"Cache Manager Semaphore");
cacheMembers.setHash(vnodeHash);
cacheMembers.setIsEqual(vnodeisEqual);
}
// Given a vnode and protection level, see if we have it in cache already
void *cacheManager::findBlock(vnode *target,bool readOnly) {
cacheMember *candidate=reinterpret_cast<cacheMember *>(cacheMembers.find(target));
if (!candidate || readOnly || candidate->vp->getProtection()>=writable)
return candidate;
// At this point, we have the first one in the hahs bucket. Loop over the hash bucket from now on,
// At this point, we have the first one in the hash bucket. Loop over the hash bucket from now on,
// looking for an equality and writability match...
for (struct cacheMember *cur=candidate;cur;cur=reinterpret_cast<cacheMember *>(cur->next)) {
if ((target==cur->vn) && (readOnly || (cur->vp->getProtection()>=writable)))
@ -36,6 +39,7 @@ void *cacheManager::findBlock(vnode *target,bool readOnly) {
return createBlock(target,false);
}
// No cache hit found; have to make a new one. Find a virtual page, create a vnode, and map.
void *cacheManager::createBlock(vnode *target,bool readOnly, cacheMember *candidate) {
bool foundSpot=false;
vpage *prev=NULL,*cur=NULL;

View File

@ -4,6 +4,7 @@
static sem_id errorPrinting=0;
// Create a function for standardized formats. Wish I could get rid of the warning associated with this...
void error(char *fmt, ...)
{
if (errorPrinting==0)

View File

@ -3,6 +3,7 @@
#include <stdlib.h>
#include <string.h>
// Handy function (actually handy for the casting) to add a long to a void *
void *addOffset(void *base,unsigned long offset) {
return (void *)(((unsigned long)base+offset));
}
@ -10,6 +11,7 @@ void *addOffset(void *base,unsigned long offset) {
pageManager::pageManager(void) {
}
// Since all of the physical pages will need page structures, allocate memory off of the top for them. Set up the lists and semaphores.
void pageManager::setup(void *area,int pages) {
// Calculate the number of pages that we will need to hold the page structures
int pageOverhead=((pages*sizeof(page))+(PAGE_SIZE-1))/PAGE_SIZE;
@ -26,6 +28,7 @@ void pageManager::setup(void *area,int pages) {
error ("pageManager::setup - %d pages ready to rock and roll\n",unused.count());
}
// Try to get a clean page first. If that fails, get a dirty one and clean it. Loop on this.
page *pageManager::getPage(void) {
page *ret=NULL;
while (!ret)
@ -53,35 +56,7 @@ page *pageManager::getPage(void) {
return ret;
}
bool pageManager::getContiguousPages(int pages,page **location) {
unsigned long current, start=0, next;
page *curPage;
int count=0;
while (count<pages) {
curPage=getPage();
current=curPage->getAddress();
if (start==0) {
start=current;
location[count++]=curPage;
}
else if (current==start+PAGE_SIZE*count) // This is the next one in line
location[count++]=curPage;
else if (current==start-PAGE_SIZE) { // Found the one directly previous
memmove(location[1],location[0],count*sizeof(page *));
start=current;
location[0]=curPage;
count++;
}
else { // Forget this series - it doesn't seem to be going anywhere...
while (--count>=0) {
freePage(location[count]);
location[count]=NULL;
}
}
}
return true;
}
// Take page from in use list and put it on the unused list
void pageManager::freePage(page *toFree) {
error ("pageManager::freePage; count = %d, address = %p\n",toFree->count,toFree);
if (atomic_add(&(toFree->count),-1)==1) { // atomic_add returns the *PREVIOUS* value. So we need to check to see if the one we are wasting was the last one.
@ -97,6 +72,7 @@ void pageManager::freePage(page *toFree) {
}
}
// Loop forever cleaning any necessary pages
void pageManager::cleaner(void) {
while (1) {
snooze(250000);
@ -104,6 +80,7 @@ void pageManager::cleaner(void) {
}
}
// Find a page that needs cleaning. Take it from the "unused" list, clean it and put it on the clean list.
void pageManager::cleanOnePage(void) {
if (unused.count()) {
acquire_sem(unusedLock);
@ -118,6 +95,7 @@ void pageManager::cleanOnePage(void) {
}
}
// Calculate how desperate we are for physical pages; 1 is not desperate at all, 5 is critical.
int pageManager::desperation(void) { // Formula to determine how desperate system is to get pages back...
int percentClean=(unused.count()+clean.count())*100/totalPages;
if (percentClean>30) return 1;

View File

@ -13,7 +13,6 @@ class pageManager {
// Mutators
page *getPage(void);
bool getContiguousPages(int pages,page **location);
// Accessors
int desperation(void);

View File

@ -8,6 +8,7 @@
extern vmHeaderBlock *vmBlock;
// Set up the swap file and make a semaphore to lock it
swapFileManager::swapFileManager(void)
{
swapFile = open("/boot/var/tmp/OBOS_swap",O_RDWR|O_CREAT,0x777 );
@ -16,6 +17,7 @@ swapFileManager::swapFileManager(void)
lockFreeList=create_sem(1,"SwapFile Free List Semaphore"); // Should have team name in it.
}
// Try to get a page from the free list. If not, make a new page
vnode &swapFileManager::findNode(void)
{
//error ("swapFileManager::findNode: Entering findNode \n");
@ -35,15 +37,15 @@ vnode &swapFileManager::findNode(void)
//error (" New One: %d\n",newNode->offset);
}
newNode->valid=false;
newNode->count=1;
//error ("swapFileManager::findNode: swapFileFreeList is now: ");
//swapFileFreeList.dump();
return *newNode;
}
// Add this page to the free list.
void swapFileManager::freeVNode(vnode &v)
{
if ( atomic_add(&v.count,-1)==1)
if (!v.vpages.count())
{
//error ("locking in sfm\n");
lock();

View File

@ -10,12 +10,11 @@ struct vnode : public node
int fd;
unsigned long offset;
bool valid;
long count;
list vpages;
vnode (void)
{
valid=false;
count=0;
}
};
#define B_OS_NAME_LENGTH 32

View File

@ -9,6 +9,7 @@ class poolvnode;
class pageManager;
class swapFileManager;
class cacheManager;
class vnodeManager;
#endif
struct vmHeaderBlock
@ -19,6 +20,7 @@ struct vmHeaderBlock
pageManager *pageMan;
swapFileManager *swapMan;
cacheManager *cacheMan;
vnodeManager *vnodeMan;
};
#endif

View File

@ -7,6 +7,7 @@
#include "pageManager.h"
#include "swapFileManager.h"
#include "cacheManager.h"
#include "vnodeManager.h"
#define I_AM_VM_INTERFACE
#include "vmHeaderBlock.h"
#include "vmInterface.h"
@ -63,6 +64,7 @@ int32 pagerThread(void *areaMan)
vmInterface::vmInterface(int pages)
{
// Make the area for testing
char temp[1000];
sprintf (temp,"vm_test_clone_%ld",getpid());
if (clone_area(temp,(void **)(&vmBlock),B_ANY_ADDRESS,B_WRITE_AREA,find_area("vm_test"))<0)
@ -82,6 +84,7 @@ vmInterface::vmInterface(int pages)
exit(1);
}
error ("Need %d pages, creation calls for %d\n",pageCount,pages);
// Make all of the managers and the vmBlock to hold them
void *currentAddress = addToPointer(vmBlock,sizeof(struct vmHeaderBlock));
vmBlock->pageMan = new (currentAddress) pageManager;
currentAddress=addToPointer(currentAddress,sizeof(pageManager));
@ -97,6 +100,8 @@ vmInterface::vmInterface(int pages)
currentAddress=addToPointer(currentAddress,sizeof(swapFileManager));
vmBlock->cacheMan = new (currentAddress) cacheManager;
currentAddress=addToPointer(currentAddress,sizeof(cacheManager));
vmBlock->vnodeMan = new (currentAddress) vnodeManager;
currentAddress=addToPointer(currentAddress,sizeof(vnodeManager));
error ("Need %d pages, creation calls for %d\n",pageCount,pages);
error ("vmBlock is at %x, end of structures is at %x, pageMan called with address %x, pages = %d\n",vmBlock,currentAddress,addToPointer(vmBlock,PAGE_SIZE*pageCount),pages-pageCount);
}
@ -105,11 +110,13 @@ vmInterface::vmInterface(int pages)
error ("Area found!\n");
}
// Start the kernel daemons
resume_thread(tid_cleaner=spawn_thread(cleanerThread,"cleanerThread",0,(vmBlock->pageMan)));
resume_thread(tid_saver=spawn_thread(saverThread,"saverThread",0,getAM()));
resume_thread(tid_pager=spawn_thread(pagerThread,"pagerThread",0,getAM()));
}
// Find an area from an address that it contains
int vmInterface::getAreaByAddress(void *address)
{
int retVal;

View File

@ -1,18 +1,75 @@
#include <vnodeManager.h>
#include <vnodePool.h>
vpage *vnodeManager::findVnode(vnode &target)
{
managedVnode *found=vnodes.find(&target);
// Functions for hash and isEqual
ulong mnHash (node &vnodule) { return ((vnode &)vnodule).fd; }
bool mnIsEqual (node &vnodule1,node &vnodule2) {
vnode &v1= ((vnode &)vnodule1);
vnode &v2= ((vnode &)vnodule2);
return v1.fd==v2.fd && v1.offset==v2.offset;
}
// Set the hash and isEqual functions
vnodeManager::vnodeManager(void) : vnodes(20) {
vnodes.setHash(mnHash);
vnodes.setIsEqual(mnIsEqual);
}
// Find the vnode and return the first page that points to it.
vpage *vnodeManager::findVnode(vnode &target) {
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (found==NULL)
return NULL;
else
return found->pages.peek();
return reinterpret_cast<vpage *>(found->vpages.top());
}
void vnodeManager::addVnode(vnode &target,vpage &vp)
{
// Allocate space for a managed vnode
managedVnode *mv = NULL; // Fill this in later - maybe another pool?
mv->node=&target;
mv->pages.add(&vp);
// If this vnode is already in use, add this vpage to it and return one to clone. If not, add this vnode, with this vpage, and return null
// This method will make a new vnode object
vpage *vnodeManager::addVnode(vnode &target,vpage &vp) {
vpage *retVal;
error ("vnodeManager::addVnode : Adding by reference node %x, fd = %d, offset = %d\n",&target,target.fd,target.offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (!found) {
found=new (vmBlock->vnodePool->get()) vnode;
found->fd=target.fd;
found->offset=target.offset;
vnodes.add(found);
retVal=NULL;
}
else
retVal=reinterpret_cast<vpage *>(found->vpages.top());
found->vpages.add(&vp);
return retVal;
}
// If this vnode is already in use, add this vpage to it and return one to clone. If not, add this vnode, with this vpage, and return null
// This method will NOT make a new vnode object
vpage *vnodeManager::addVnode(vnode *target,vpage &vp) {
vpage *retVal;
error ("vnodeManager::addVnode : Adding by pointer node %x, fd = %d, offset = %d\n",target,target->fd,target->offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(target));
if (!found) {
found=target;
vnodes.add(found);
retVal=NULL;
}
else
retVal=reinterpret_cast<vpage *>(found->vpages.top());
found->vpages.add(&vp);
found->vpages.dump();
vnodes.dump();
}
// Remove a vpage from the manager; return "is this the last one"
bool vnodeManager::remove(vnode &target,vpage &vp) {
error ("vnodeManager::remove : Removing by reference node %x, fd = %d, offset = %d\n",&target,target.fd,target.offset);
vnode *found=reinterpret_cast<vnode *>(vnodes.find(&target));
if (!found) {
vnodes.dump();
throw ("An attempt to remove from an unknown vnode occured!\n");
}
found->vpages.remove(&vp);
return (found->vpages.count()==0);
}

View File

@ -1,33 +1,21 @@
#ifndef VNODE_MANAGER
#define VNODE_MANAGER
#include <vpage.h>
#include <hashTable.h>
// vnode manager tracks which vnodes are already mapped to pages
// and facilitates sharing of memory containing the same disk space.
struct managedVnode
{
vnode *node;
list pages; // Hold the list of vpages that this vnode points to
};
ulong mnHash (node &vnodule) { return ((managedNode &)vnodule).node->fd; }
bool mnIsEqual (node &vnodule1,&vnodule2) {
managedNode &v1= ((managedNode &)vnodule1);
managedNode &v2= ((managedNode &)vnodule2);
return v1.node->fd==v2.node->fd && v1.node->offset==v2.node->offset;
}
class vnodeManager
{
public:
vnodeManager(void) : vnodes(20)
{
vnodes.setHash(mnHash);
vnodes.setIsEqual(mnIsEqual);
}
vnodeManager(void);
vpage *findVnode(vnode &target); // pass in a vnode, get back the "master" vpage
void addVnode (vnode &target, vpage &vp);
vpage *addVnode (vnode &target, vpage &vp);
vpage *addVnode(vnode *target,vpage &vp);
bool remove(vnode &target,vpage &vp);
private:
hashTable vnodes;
}
};
#endif

View File

@ -2,12 +2,14 @@
#include "vnodePool.h"
#include "vmHeaderBlock.h"
#include "areaManager.h"
#include "vnodeManager.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
extern vmHeaderBlock *vmBlock;
// Write this vpage out if necessary
void vpage::flush(void) {
if (protection==writable && dirty) {
//error (vpage::write_block: writing, backingNode->fd = %d, backingNode->offset = %d, address = %x\n",backingNode->fd, backingNode->offset,loc);
@ -20,6 +22,7 @@ void vpage::flush(void) {
}
}
// Load this vpage in if necessary
void vpage::refresh(void) {
if (backingNode->valid==false)
return; // Do nothing. This prevents "garbage" data on disk from being read in...
@ -30,52 +33,68 @@ void vpage::refresh(void) {
error ("vpage::refresh: failed, fd = %d, errno = %d, %s\n",backingNode->fd,errno,strerror(errno));
}
// Simple, empty constructor
vpage::vpage(void) : physPage(NULL),backingNode(NULL),protection(none),dirty(false),swappable(false),start_address(0),end_address(0)
{
}
// Does the real setup work for making a vpage.
// backing and/or physMem can be NULL/0.
void vpage::setup(unsigned long start,vnode *backing, page *physMem,protectType prot,pageState state) {
// Basic setup from parameters
vpage *clonedPage; // This is the page that this page is to be the clone of...
error ("vpage::setup: start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
physPage=physMem;
backingNode=backing;
protection=prot;
dirty=false;
swappable=(state==NO_LOCK);
start_address=start;
end_address=start+PAGE_SIZE-1;
protection=prot;
swappable=(state==NO_LOCK);
if (backing) {
backingNode=backing;
atomic_add(&(backing->count),1);
// Set up the backing store. If one is specified, use it; if not, get a swap file page.
if (backingNode) {
clonedPage=vmBlock->vnodeMan->addVnode(*backingNode,*this); // Use the reference version which will make a new one if this one is not found
if (clonedPage) {
physPage=clonedPage->physPage;
protection=(protection<=readable)?protection: copyOnWrite;
if (clonedPage->getProtection()<=readable)
clonedPage->setProtection(copyOnWrite);
}
else
}
else { // Going to swap file.
backingNode=&(vmBlock->swapMan->findNode());
if (!physMem && (state!=LAZY) && (state!=NO_LOCK)) {
clonedPage=vmBlock->vnodeMan->addVnode(backingNode,*this); // Use the pointer version which will use this one. Should always return NULL
}
// If there is no physical page already and we can't wait to get one, then get one now
if (!physPage && (state!=LAZY) && (state!=NO_LOCK)) {
physPage=vmBlock->pageMan->getPage();
error ("vpage::setup, state = %d, allocated page %x\n",state,physPage);
}
else {
if (physMem)
atomic_add(&(physMem->count),1);
physPage=physMem;
else { // We either don't need it or we already have it.
if (physPage)
atomic_add(&(physPage->count),1);
}
dirty=false;
error ("vpage::setup: ended : start = %x, vnode.fd=%d, vnode.offset=%d, physMem = %x\n",start,((backing)?backing->fd:0),((backing)?backing->offset:0), ((physMem)?(physMem->getAddress()):0));
}
// Destruction.
void vpage::cleanup(void) {
if (physPage) { // Note that free means release one reference
error ("vpage::cleanup, freeing physcal page %x\n",physPage);
vmBlock->pageMan->freePage(physPage);
vmBlock->pageMan->freePage(physPage); // This does nothing if someone else is using the physical page
}
if (backingNode) {
if (backingNode->fd)
if (backingNode->fd==vmBlock->swapMan->getFD())
if (backingNode) { // If no one else is using this vnode, wipe it out
if (vmBlock->vnodeMan->remove(*backingNode,*this))
if (backingNode->fd && (backingNode->fd==vmBlock->swapMan->getFD()))
vmBlock->swapMan->freeVNode(*backingNode);
else
if ( atomic_add(&(backingNode->count),-1)==1)
vmBlock->vnodePool->put(backingNode);
}
}
// Change this pages protection
void vpage::setProtection(protectType prot) {
protection=prot;
// Change the hardware
@ -85,7 +104,7 @@ void vpage::setProtection(protectType prot) {
// true = OK, false = panic.
bool vpage::fault(void *fault_address, bool writeError) {
error ("vpage::fault: virtual address = %lx, write = %s\n",(unsigned long) fault_address,((writeError)?"true":"false"));
if (writeError && physPage) {
if (writeError && physPage) { // If we already have a page and this is a write, it is either a copy on write or a "dirty" notice
dirty=true;
if (protection==copyOnWrite) { // Else, this was just a "let me know when I am dirty"...
page *newPhysPage=vmBlock->pageMan->getPage();
@ -98,6 +117,7 @@ bool vpage::fault(void *fault_address, bool writeError) {
}
return true;
}
// Guess this is the real deal. Get a physical page.
physPage=vmBlock->pageMan->getPage();
error ("vpage::fault - regular - allocated page %x\n",physPage);
if (!physPage) // No room at the inn
@ -149,6 +169,7 @@ void vpage::setInt(unsigned long address,int value,areaManager *manager) {
*((int *)(address-start_address+physPage->getAddress()))=value;
}
// Swaps pages out where necessary.
void vpage::pager(int desperation) {
//error ("vpage::pager start desperation = %d\n",desperation);
if (!swappable)
@ -170,6 +191,7 @@ void vpage::pager(int desperation) {
physPage=NULL;
}
// Saves dirty pages
void vpage::saver(void) {
if (dirty)
flush();