* Added vm_page_write_modified_page_range(), which is similar to

vm_page_write_modified_pages(), save that it only writes pages in the
  given range.
* Added vm_page_schedule_write_page_range() which schedules all modified
  pages in the given cache's range for writing by the page writer.
* Added _kern_sync_memory() syscall and the msync() POSIX function.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25620 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-05-22 21:51:12 +00:00
parent 05ffb70e8f
commit 0f448d21e5
7 changed files with 146 additions and 3 deletions

View File

@ -25,6 +25,11 @@
/* mmap() error return code */
#define MAP_FAILED ((void*)-1)
/* msync() flags */
#define MS_ASYNC 0x01
#define MS_SYNC 0x02
#define MS_INVALIDATE 0x04
__BEGIN_DECLS
@ -32,6 +37,8 @@ void* mmap(void* address, size_t length, int protection, int flags,
int fd, off_t offset);
int munmap(void* address, size_t length);
int msync(void* address, size_t length, int flags);
int shm_open(const char* name, int openMode, mode_t permissions);
int shm_unlink(const char* name);

View File

@ -96,6 +96,7 @@ status_t _user_delete_area(area_id area);
area_id _user_map_file(const char *uname, void **uaddress, int addressSpec,
addr_t size, int protection, int mapping, int fd, off_t offset);
status_t _user_unmap_memory(void *address, addr_t size);
status_t _user_sync_memory(void *address, addr_t size, int flags);
area_id _user_area_for(void *address);
area_id _user_find_area(const char *name);
status_t _user_get_area_info(area_id area, area_info *info);

View File

@ -35,8 +35,12 @@ size_t vm_page_num_pages(void);
size_t vm_page_num_free_pages(void);
size_t vm_page_num_available_pages(void);
status_t vm_page_write_modified_page_range(struct vm_cache *cache,
uint32 firstPage, uint32 endPage, bool fsReenter);
status_t vm_page_write_modified_pages(struct vm_cache *cache, bool fsReenter);
void vm_page_schedule_write_page(struct vm_page *page);
void vm_page_schedule_write_page_range(struct vm_cache *cache,
uint32 firstPage, uint32 endPage);
void vm_page_unreserve_pages(uint32 count);
void vm_page_reserve_pages(uint32 count);

View File

@ -319,6 +319,7 @@ extern area_id _kern_map_file(const char *name, void **address,
int addressSpec, addr_t size, int protection,
int mapping, int fd, off_t offset);
extern status_t _kern_unmap_memory(void *address, addr_t size);
extern status_t _kern_sync_memory(void *address, addr_t size, int flags);
/* kernel port functions */
extern port_id _kern_create_port(int32 queue_length, const char *name);

View File

@ -13,6 +13,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <OS.h>
#include <KernelExport.h>
@ -5523,3 +5524,83 @@ _user_unmap_memory(void *_address, addr_t size)
// unmap
return unmap_address_range(locker.AddressSpace(), address, size, false);
}
status_t
_user_sync_memory(void *_address, addr_t size, int flags)
{
addr_t address = (addr_t)_address;
size = PAGE_ALIGN(size);
// check params
if ((address % B_PAGE_SIZE) != 0)
return B_BAD_VALUE;
if ((addr_t)address + size < (addr_t)address || !IS_USER_ADDRESS(address)
|| !IS_USER_ADDRESS((addr_t)address + size)) {
// weird error code required by POSIX
return ENOMEM;
}
bool writeSync = (flags & MS_SYNC) != 0;
bool writeAsync = (flags & MS_ASYNC) != 0;
if (writeSync && writeAsync)
return B_BAD_VALUE;
if (size == 0 || !writeSync && !writeAsync)
return B_OK;
// iterate through the range and sync all concerned areas
while (size > 0) {
// read lock the address space
AddressSpaceReadLocker locker;
status_t error = locker.SetTo(team_get_current_team_id());
if (error != B_OK)
return error;
// get the first area
vm_area* area = vm_area_lookup(locker.AddressSpace(), address);
if (area == NULL)
return B_NO_MEMORY;
uint32 offset = address - area->base;
size_t rangeSize = min_c(area->size - offset, size);
offset += area->cache_offset;
// lock the cache
AreaCacheLocker cacheLocker(area);
if (!cacheLocker)
return B_BAD_VALUE;
vm_cache* cache = area->cache;
locker.Unlock();
uint32 firstPage = offset >> PAGE_SHIFT;
uint32 endPage = firstPage + (rangeSize >> PAGE_SHIFT);
// write the pages
if (cache->type == CACHE_TYPE_VNODE) {
if (writeSync) {
// synchronous
error = vm_page_write_modified_page_range(cache, firstPage,
endPage, false);
if (error != B_OK)
return error;
} else {
// asynchronous
vm_page_schedule_write_page_range(cache, firstPage, endPage);
// TODO: This is probably not quite what is supposed to happen.
// Especially when a lot has to be written, it might take ages
// until it really hits the disk.
}
}
address += rangeSize;
size -= rangeSize;
}
// NOTE: If I understand it correctly the purpose of MS_INVALIDATE is to
// synchronize multiple mappings of the same file. In our VM they never get
// out of sync, though, so we don't have to do anything.
return B_OK;
}

View File

@ -1306,17 +1306,26 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
// #pragma mark - private kernel API
/*! You need to hold the vm_cache lock when calling this function.
/*! Writes a range of modified pages of a cache to disk.
You need to hold the vm_cache lock when calling this function.
Note that the cache lock is released in this function.
\param cache The cache.
\param firstPage Offset (in page size units) of the first page in the range.
\param endPage End offset (in page size units) of the page range. The page
at this offset is not included.
*/
status_t
vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
vm_page_write_modified_page_range(struct vm_cache *cache, uint32 firstPage,
uint32 endPage, bool fsReenter)
{
// ToDo: join adjacent pages into one vec list
// TODO: join adjacent pages into one vec list
for (vm_page *page = cache->page_list; page; page = page->cache_next) {
bool dequeuedPage = false;
if (page->cache_offset < firstPage || page->cache_offset >= endPage)
continue;
if (page->state == PAGE_STATE_MODIFIED) {
InterruptsSpinLocker locker(&sPageLock);
remove_page_from_queue(&sModifiedPageQueue, page);
@ -1382,6 +1391,17 @@ vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
}
/*! You need to hold the vm_cache lock when calling this function.
Note that the cache lock is released in this function.
*/
status_t
vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
{
return vm_page_write_modified_page_range(cache, 0,
cache->virtual_size >> PAGE_SHIFT, fsReenter);
}
/*! Schedules the page writer to write back the specified \a page.
Note, however, that it might not do this immediately, and it can well
take several seconds until the page is actually written out.
@ -1397,6 +1417,28 @@ vm_page_schedule_write_page(vm_page *page)
}
/*! Cache must be locked.
*/
void
vm_page_schedule_write_page_range(struct vm_cache *cache, uint32 firstPage,
uint32 endPage)
{
uint32 modified = 0;
for (vm_page *page = cache->page_list; page; page = page->cache_next) {
bool dequeuedPage = false;
if (page->cache_offset >= firstPage && page->cache_offset < endPage
&& page->state == PAGE_STATE_MODIFIED) {
vm_page_requeue(page, false);
modified++;
}
}
if (modified > 0)
release_sem_etc(sWriterWaitSem, 1, B_DO_NOT_RESCHEDULE);
}
void
vm_page_init_num_pages(kernel_args *args)
{

View File

@ -142,6 +142,13 @@ munmap(void* address, size_t length)
}
int
msync(void* address, size_t length, int flags)
{
RETURN_AND_SET_ERRNO(_kern_sync_memory(address, length, flags));
}
int
shm_open(const char* name, int openMode, mode_t permissions)
{