* VMCache::Resize(): Corrected TODO comment.
* Changed the semantics of VMCache::HasPage(). It was interpreted inconsistently by the derived classes. Now it returns whether the backing store can provide the page (via Read()). The default implementation returns false. VNodeCache::HasPage() only returns true, if the given offset is within the cache (i.e. file) bounds. This prevents vm_soft_fault() from adding clean pages to vnode caches on faults beyond the file bounds. Probably fixes #5473 -- at least mmap_resize_test behaves correctly, now. * Removed redundant HasPage() and Fault() overrides in VMCache derived classes. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36374 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
efeca209a1
commit
6e83a6fac9
10
src/system/kernel/cache/vnode_store.cpp
vendored
10
src/system/kernel/cache/vnode_store.cpp
vendored
@ -36,10 +36,8 @@ VMVnodeCache::Init(struct vnode *vnode, uint32 allocationFlags)
|
||||
bool
|
||||
VMVnodeCache::HasPage(off_t offset)
|
||||
{
|
||||
// We always pretend to have the page - even if it's beyond the size of
|
||||
// the file. The read function will only cut down the size of the read,
|
||||
// it won't fail because of that.
|
||||
return true;
|
||||
return ROUNDUP(offset, B_PAGE_SIZE) >= virtual_base
|
||||
&& offset < virtual_end;
|
||||
}
|
||||
|
||||
|
||||
@ -103,6 +101,10 @@ VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
|
||||
status_t
|
||||
VMVnodeCache::Fault(struct VMAddressSpace *aspace, off_t offset)
|
||||
{
|
||||
if (!HasPage(offset))
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
// vm_soft_fault() reads the page in.
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
||||
|
@ -1078,8 +1078,9 @@ VMCache::Resize(off_t newSize, int priority)
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_remove_all_page_mappings(page);
|
||||
ASSERT(page->wired_count == 0);
|
||||
// TODO: Find a real solution! Unmapping is probably fine, but
|
||||
// we have no way of unmapping wired pages here.
|
||||
// TODO: Find a real solution! If the page is wired
|
||||
// temporarily (e.g. by lock_memory()), we actually must not
|
||||
// unmap it!
|
||||
RemovePage(page);
|
||||
vm_page_free(this, page);
|
||||
// Note: When iterating through a IteratableSplayTree
|
||||
@ -1145,10 +1146,19 @@ VMCache::Commit(off_t size, int priority)
|
||||
}
|
||||
|
||||
|
||||
/*! Returns whether the cache's underlying backing store could deliver the
|
||||
page at the given offset.
|
||||
|
||||
Basically it returns whether a Read() at \a offset would at least read a
|
||||
partial page (assuming that no unexpected errors occur or the situation
|
||||
changes in the meantime).
|
||||
*/
|
||||
bool
|
||||
VMCache::HasPage(off_t offset)
|
||||
{
|
||||
return offset >= virtual_base && offset <= virtual_end;
|
||||
// In accordance with Fault() the default implementation doesn't have a
|
||||
// backing store and doesn't allow faults.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "VMDeviceCache.h"
|
||||
|
||||
|
||||
@ -17,14 +18,6 @@ VMDeviceCache::Init(addr_t baseAddress, uint32 allocationFlags)
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VMDeviceCache::HasPage(off_t offset)
|
||||
{
|
||||
// this should never be called
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Read(off_t offset, const iovec *vecs, size_t count,
|
||||
uint32 flags, size_t *_numBytes)
|
||||
@ -41,10 +34,3 @@ VMDeviceCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
// no place to write, this will cause the page daemon to skip this store
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Fault(struct VMAddressSpace* addressSpace, off_t offset)
|
||||
{
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
@ -17,16 +17,11 @@ class VMDeviceCache : public VMCache {
|
||||
public:
|
||||
status_t Init(addr_t baseAddress, uint32 allocationFlags);
|
||||
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
uint32 flags, size_t *_numBytes);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
uint32 flags, size_t *_numBytes);
|
||||
|
||||
virtual status_t Fault(struct VMAddressSpace* addressSpace,
|
||||
off_t offset);
|
||||
|
||||
private:
|
||||
addr_t fBaseAddress;
|
||||
};
|
||||
|
@ -3,6 +3,7 @@
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "VMNullCache.h"
|
||||
|
||||
|
||||
@ -11,10 +12,3 @@ VMNullCache::Init(uint32 allocationFlags)
|
||||
{
|
||||
return VMCache::Init(CACHE_TYPE_NULL, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMNullCache::Fault(struct VMAddressSpace* addressSpace, off_t offset)
|
||||
{
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
@ -16,9 +16,6 @@
|
||||
class VMNullCache : public VMCache {
|
||||
public:
|
||||
status_t Init(uint32 allocationFlags);
|
||||
|
||||
virtual status_t Fault(struct VMAddressSpace* addressSpace,
|
||||
off_t offset);
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user