* Added FS interface hooks io() and cancel_io(). The former is supposed

to provide asynchrounous (or only synchronous, if asynchronous is not
  supported) I/O request support. It will eventually replace
  {read,write}_pages(). None of the FS implementations implement them
  yet.
* Implemented some support functions for request-based I/O. File system
  implementations can use do_fd_io() which passes an I/O request to the
  layer responsible for a given FD, and do_iterative_fd_io(), which
  translates a request for a file to subrequests for the underlying
  device and passes them on. Both fall back to synchrounous processing
  when the io() hook is not supported.
  Furthermore added vfs_synchronous_io() which should be handy for the
  devfs to perform io_requests synchronously for devices that don't
  support the io() hook.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26655 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-07-28 02:07:20 +00:00
parent 09f0e0ec68
commit ec598fe493
15 changed files with 472 additions and 4 deletions

View File

@ -21,6 +21,9 @@ struct stat;
struct fs_info;
struct select_sync;
typedef struct IORequest io_request;
/* additional flags passed to write_stat() (see NodeMonitor.h for the others) */
// NOTE: Changing the constants here or in NodeMonitor.h will break
// src/kits/storage/LibBeAdapter.cpp:_kern_write_stat().
@ -124,6 +127,12 @@ struct fs_vnode_ops {
void *cookie, off_t pos, const iovec *vecs, size_t count,
size_t *_numBytes);
/* asynchronous I/O */
status_t (*io)(fs_volume *volume, fs_vnode *vnode, void *cookie,
io_request *request);
status_t (*cancel_io)(fs_volume *volume, fs_vnode *vnode, void *cookie,
io_request *request);
/* cache file access */
status_t (*get_file_map)(fs_volume *volume, fs_vnode *vnode, off_t offset,
size_t size, struct file_io_vec *vecs, size_t *_count);
@ -284,6 +293,14 @@ typedef struct file_system_module_info {
/* file system add-ons only prototypes */
// callbacks for do_iterative_fd_io()
typedef status_t (*iterative_io_get_vecs)(void *cookie, io_request* request,
off_t offset, size_t size, struct file_io_vec *vecs,
size_t *_count);
typedef status_t (*iterative_io_finished)(void* cookie, io_request* request,
status_t status);
extern status_t new_vnode(fs_volume *volume, ino_t vnodeID, void *privateNode,
fs_vnode_ops *ops);
extern status_t publish_vnode(fs_volume *volume, ino_t vnodeID,
@ -309,6 +326,10 @@ extern status_t write_file_io_vec_pages(int fd,
const struct file_io_vec *fileVecs, size_t fileVecCount,
const struct iovec *vecs, size_t vecCount,
uint32 *_vecIndex, size_t *_vecOffset, size_t *_bytes);
extern status_t do_fd_io(int fd, io_request *request);
extern status_t do_iterative_fd_io(int fd, io_request *request,
iterative_io_get_vecs getVecs,
iterative_io_finished finished, void *cookie);
extern status_t notify_entry_created(dev_t device, ino_t directory,
const char *name, ino_t node);

View File

@ -26,6 +26,8 @@ typedef fssh_ino_t fssh_vnode_id;
/* the file system's private data structures */
typedef void *fssh_fs_cookie;
typedef struct FSSHIORequest fssh_io_request;
/* additional flags passed to write_stat() */
#define FSSH_B_STAT_SIZE_INSECURE 0x2000
@ -142,6 +144,12 @@ struct fssh_fs_vnode_ops {
fssh_fs_cookie cookie, fssh_off_t pos, const fssh_iovec *vecs,
fssh_size_t count, fssh_size_t *_numBytes);
/* asynchronous I/O */
fssh_status_t (*io)(fssh_fs_volume *volume, fssh_fs_vnode *vnode,
void *cookie, fssh_io_request *request);
fssh_status_t (*cancel_io)(fssh_fs_volume *volume, fssh_fs_vnode *vnode,
void *cookie, fssh_io_request *request);
/* cache file access */
fssh_status_t (*get_file_map)(fssh_fs_volume *volume, fssh_fs_vnode *vnode,
fssh_off_t offset, fssh_size_t size,

View File

@ -97,6 +97,10 @@ status_t vfs_read_pages(struct vnode *vnode, void *cookie, off_t pos,
const iovec *vecs, size_t count, size_t *_numBytes);
status_t vfs_write_pages(struct vnode *vnode, void *cookie, off_t pos,
const iovec *vecs, size_t count, size_t *_numBytes);
status_t vfs_synchronous_io(io_request* request,
status_t (*doIO)(void* cookie, off_t offset, void* buffer,
size_t length),
void* cookie);
status_t vfs_get_vnode_cache(struct vnode *vnode, struct VMCache **_cache,
bool allocate);
status_t vfs_get_file_map(struct vnode *vnode, off_t offset, size_t size,

View File

@ -2165,6 +2165,9 @@ fs_vnode_ops gBFSVnodeOps = {
&bfs_read_pages,
&bfs_write_pages,
NULL, // io()
NULL, // cancel_io()
&bfs_get_file_map,
&bfs_ioctl,

View File

@ -2014,6 +2014,9 @@ fs_vnode_ops gCDDAVnodeOps = {
cdda_read_pages,
cdda_write_pages,
NULL, // io()
NULL, // cancel_io()
NULL, // get_file_map()
// common

View File

@ -509,6 +509,9 @@ fs_vnode_ops gExt2VnodeOps = {
&ext2_read_pages,
NULL,
NULL, // io()
NULL, // cancel_io()
&ext2_get_file_map,
NULL,

View File

@ -1268,6 +1268,9 @@ fs_vnode_ops gFATVnodeOps = {
&dosfs_read_pages,
&dosfs_write_pages,
NULL, // io()
NULL, // cancel_io()
&dosfs_get_file_map,
&dosfs_ioctl,

View File

@ -679,6 +679,9 @@ fs_vnode_ops gISO9660VnodeOps = {
&fs_read_pages,
NULL,
NULL, // io()
NULL, // cancel_io()
/* cache file access */
NULL,

View File

@ -754,6 +754,9 @@ fs_vnode_ops gReiserFSVnodeOps = {
NULL, // &reiserfs_read_pages,
NULL, // &reiserfs_write_pages,
NULL, // io()
NULL, // cancel_io()
NULL, // &reiserfs_get_file_map,
NULL, // &reiserfs_ioctl,

View File

@ -1836,6 +1836,9 @@ fs_vnode_ops kVnodeOps = {
&devfs_read_pages,
&devfs_write_pages,
NULL, // io()
NULL, // cancel_io()
NULL, // get_file_map
/* common */

View File

@ -5,6 +5,8 @@ UsePrivateHeaders [ FDirName kernel fs ] ;
UsePrivateHeaders [ FDirName kernel util ] ;
UsePrivateHeaders net shared storage ;
UseHeaders [ FDirName $(SUBDIR) $(DOTDOT) device_manager ] ;
KernelMergeObject kernel_fs.o :
fd.cpp
fifo.cpp

View File

@ -980,6 +980,9 @@ static fs_vnode_ops sFIFOVnodeOps = {
&fifo_read_pages,
&fifo_write_pages,
NULL, // io()
NULL, // cancel_io()
NULL, // get_file_map
/* common */

View File

@ -1146,6 +1146,9 @@ fs_vnode_ops sVnodeOps = {
&rootfs_read_pages,
&rootfs_write_pages,
NULL, // io()
NULL, // cancel_io()
NULL, // get_file_map()
/* common */

View File

@ -8705,3 +8705,6 @@ _user_open_query(dev_t device, const char *userQuery, size_t queryLength,
free(query);
return fd;
}
#include "vfs_request_io.cpp"

View File

@ -0,0 +1,403 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
// included by vfs.cpp
#include "io_requests.h"
struct iterative_io_cookie {
struct vnode* vnode;
file_descriptor* descriptor;
iterative_io_get_vecs get_vecs;
iterative_io_finished finished;
void* cookie;
off_t request_offset;
io_request_finished_callback next_finished_callback;
void* next_finished_cookie;
};
class DoIO {
public:
DoIO(bool write, bool isPhysical)
:
fWrite(write),
fIsPhysical(isPhysical)
{
}
virtual ~DoIO()
{
}
status_t IO(off_t offset, void* _buffer, size_t length)
{
if (!fIsPhysical)
return InternalIO(offset, _buffer, length);
// buffer points to physical address -- map pages
addr_t buffer = (addr_t)_buffer;
while (length > 0) {
addr_t pageOffset = buffer % B_PAGE_SIZE;
addr_t virtualAddress;
status_t error = vm_get_physical_page(buffer - pageOffset,
&virtualAddress, 0);
if (error != B_OK)
return error;
size_t toTransfer = min_c(length, B_PAGE_SIZE - pageOffset);
error = InternalIO(offset, (void*)(virtualAddress + pageOffset),
toTransfer);
vm_put_physical_page(virtualAddress);
if (error != B_OK)
return error;
buffer += toTransfer;
length -= toTransfer;
}
return B_OK;
}
protected:
virtual status_t InternalIO(off_t offset, void* buffer, size_t length) = 0;
protected:
bool fWrite;
bool fIsPhysical;
};
class CallbackIO : public DoIO {
public:
CallbackIO(bool write, bool isPhysical,
status_t (*doIO)(void* cookie, off_t offset, void* buffer,
size_t length),
void* cookie)
:
DoIO(write, isPhysical),
fDoIO(doIO),
fCookie(cookie)
{
}
protected:
virtual status_t InternalIO(off_t offset, void* buffer, size_t length)
{
return fDoIO(fCookie, offset, buffer, length);
}
private:
status_t (*fDoIO)(void*, off_t, void*, size_t);
void* fCookie;
};
class VnodeIO : public DoIO {
public:
VnodeIO(bool write, bool isPhysical, struct vnode* vnode, void* cookie)
:
DoIO(write, isPhysical),
fVnode(vnode),
fCookie(cookie)
{
}
protected:
virtual status_t InternalIO(off_t offset, void* buffer, size_t length)
{
size_t bytesTransferred = length;
status_t error;
if (fWrite) {
error = FS_CALL(fVnode, write, fCookie, offset, buffer,
&bytesTransferred);
} else {
error = FS_CALL(fVnode, read, fCookie, offset, buffer,
&bytesTransferred);
}
return error == B_OK && bytesTransferred != length
? B_FILE_ERROR : error;
}
private:
struct vnode* fVnode;
void* fCookie;
};
static status_t
do_iterative_fd_io_iterate(void* _cookie, io_request* request)
{
static const int32 kMaxSubRequests = 8;
iterative_io_cookie* cookie = (iterative_io_cookie*)_cookie;
request->DeleteSubRequests();
off_t requestOffset = cookie->request_offset;
size_t requestLength = request->Length()
- (requestOffset - request->Offset());
// get the next file vecs
file_io_vec vecs[kMaxSubRequests];
uint32 vecCount = kMaxSubRequests;
status_t error = cookie->get_vecs(cookie->cookie, request, requestOffset,
requestLength, vecs, &vecCount);
if (error != B_OK)
return error;
if (vecCount == 0)
return B_FILE_ERROR;
// create subrequests for the file vecs we've got
int32 subRequestCount = 0;
for (uint32 i = 0; i < vecCount && subRequestCount < kMaxSubRequests; i++) {
off_t vecOffset = vecs[i].offset;
off_t vecLength = vecs[i].length;
while (vecLength > 0 && subRequestCount < kMaxSubRequests) {
IORequest* subRequest;
error = request->CreateSubRequest(requestOffset, vecOffset,
vecLength, subRequest);
if (error != B_OK)
break;
subRequestCount++;
size_t lengthProcessed = subRequest->Length();
vecOffset += lengthProcessed;
vecLength -= lengthProcessed;
requestOffset += lengthProcessed;
requestLength -= lengthProcessed;
}
}
// Only if we couldn't create any subrequests, we fail.
if (error != B_OK && subRequestCount == 0)
return error;
request->Advance(requestOffset - cookie->request_offset);
cookie->request_offset = requestOffset;
// Schedule the subrequests.
for (IORequest* subRequest = request->FirstSubRequest();
(subRequest = request->NextSubRequest(subRequest)) != NULL;) {
if (error == B_OK) {
error = FS_CALL(cookie->vnode, io, cookie->descriptor->cookie,
subRequest);
} else {
// Once scheduling a subrequest failed, we cancel all subsequent
// subrequests.
subRequest->SetStatusAndNotify(B_CANCELED);
}
}
// TODO: Cancel the subrequests that were scheduled successfully.
return B_OK;
}
static status_t
do_iterative_fd_io_finish(void* _cookie, io_request* request, status_t status)
{
iterative_io_cookie* cookie = (iterative_io_cookie*)_cookie;
if (cookie->finished != NULL)
cookie->finished(cookie->cookie, request, status);
put_fd(cookie->descriptor);
if (cookie->next_finished_callback != NULL) {
cookie->next_finished_callback(cookie->next_finished_cookie, request,
status);
}
delete cookie;
return B_OK;
}
static status_t
do_synchronous_iterative_vnode_io(struct vnode* vnode, void* openCookie,
io_request* request, iterative_io_get_vecs getVecs,
iterative_io_finished finished, void* cookie)
{
IOBuffer* buffer = request->Buffer();
VnodeIO io(request->IsWrite(), buffer->IsPhysical(), vnode, openCookie);
iovec* vecs = buffer->Vecs();
int32 vecCount = buffer->VecCount();
off_t offset = request->Offset();
size_t length = request->Length();
status_t error = B_OK;
for (int32 i = 0; error == B_OK && length > 0 && i < vecCount; i++) {
uint8* vecBase = (uint8*)vecs[i].iov_base;
size_t vecLength = vecs[i].iov_len;
if (vecLength > length)
vecLength = length;
while (error == B_OK && vecLength > 0) {
file_io_vec fileVecs[8];
uint32 fileVecCount = 8;
error = getVecs(cookie, request, offset, vecLength, fileVecs,
&fileVecCount);
if (error == B_OK && fileVecCount == 0)
error = B_FILE_ERROR;
if (error != B_OK)
break;
for (uint32 k = 0; k < fileVecCount; k++) {
const file_io_vec& fileVec = fileVecs[i];
error = io.IO(fileVec.offset, vecBase, fileVec.length);
if (error != B_OK)
break;
offset += fileVec.length;
length -= fileVec.length;
vecBase += fileVec.length;
vecLength -= fileVec.length;
}
}
}
finished(cookie, request, error);
request->SetStatusAndNotify(error);
return error;
}
static status_t
synchronous_io(io_request* request, DoIO& io)
{
IOBuffer* buffer = request->Buffer();
iovec* vecs = buffer->Vecs();
int32 vecCount = buffer->VecCount();
off_t offset = request->Offset();
size_t length = request->Length();
for (int32 i = 0; length > 0 && i < vecCount; i++) {
void* vecBase = vecs[i].iov_base;
size_t vecLength = vecs[i].iov_len;
if (vecLength > length)
vecLength = length;
status_t error = io.IO(offset, vecBase, vecLength);
if (error != B_OK) {
request->SetStatusAndNotify(error);
return error;
}
offset += vecLength;
length -= vecLength;
}
request->SetStatusAndNotify(B_OK);
return B_OK;
}
// #pragma mark - kernel private API
extern "C" status_t
vfs_synchronous_io(io_request* request,
status_t (*doIO)(void* cookie, off_t offset, void* buffer, size_t length),
void* cookie)
{
IOBuffer* buffer = request->Buffer();
CallbackIO io(request->IsWrite(), buffer->IsPhysical(), doIO, cookie);
return synchronous_io(request, io);
}
// #pragma mark - public API
extern "C" status_t
do_fd_io(int fd, io_request* request)
{
struct vnode* vnode;
file_descriptor* descriptor = get_fd_and_vnode(fd, &vnode, true);
if (descriptor == NULL) {
request->SetStatusAndNotify(B_FILE_ERROR);
return B_FILE_ERROR;
}
CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd);
if (!HAS_FS_CALL(vnode, io)) {
// no io() call -- fall back to synchronous I/O
IOBuffer* buffer = request->Buffer();
VnodeIO io(request->IsWrite(), buffer->IsPhysical(), vnode,
descriptor->cookie);
return synchronous_io(request, io);
}
return FS_CALL(vnode, io, descriptor->cookie, request);
}
extern "C" status_t
do_iterative_fd_io(int fd, io_request* request, iterative_io_get_vecs getVecs,
iterative_io_finished finished, void* cookie)
{
struct vnode* vnode;
file_descriptor* descriptor = get_fd_and_vnode(fd, &vnode, true);
if (descriptor == NULL) {
finished(cookie, request, B_FILE_ERROR);
request->SetStatusAndNotify(B_FILE_ERROR);
return B_FILE_ERROR;
}
CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd);
if (!HAS_FS_CALL(vnode, io)) {
// no io() call -- fall back to synchronous I/O
return do_synchronous_iterative_vnode_io(vnode, descriptor->cookie,
request, getVecs, finished, cookie);
}
iterative_io_cookie* iterationCookie
= new(std::nothrow) iterative_io_cookie;
// TODO: Heed B_VIP_IO_REQUEST!
if (iterationCookie == NULL) {
// no memory -- fall back to synchronous I/O
return do_synchronous_iterative_vnode_io(vnode, descriptor->cookie,
request, getVecs, finished, cookie);
}
iterationCookie->vnode = vnode;
iterationCookie->descriptor = descriptor;
iterationCookie->get_vecs = getVecs;
iterationCookie->finished = finished;
iterationCookie->cookie = cookie;
iterationCookie->request_offset = request->Offset();
iterationCookie->next_finished_callback = request->FinishedCallback(
&iterationCookie->next_finished_cookie);
request->SetFinishedCallback(&do_iterative_fd_io_finish, iterationCookie);
request->SetIterationCallback(&do_iterative_fd_io_iterate, iterationCookie);
status_t error = do_iterative_fd_io_iterate(iterationCookie, request);
if (error != B_OK) {
finished(cookie, request, error);
request->SetStatusAndNotify(error);
return error;
}
descriptorPutter.Detach();
return B_OK;
}