Implemented force unmounting - by passing the "-f" option to "unmount" you

can now safely unmount volumes that are still in use by some applications.
Minor fixes to the FD disconnection implementation:
* put_fd() checked the condition for being able to disconnect a descriptor
  incorrectly (causing the FD to never be disconnected).
* remove_fd() would hand out disconnected descriptors (but should have
  returned NULL for them).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15952 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-01-15 19:26:42 +00:00
parent 60e5ea42e0
commit ddb7d0b6a6
3 changed files with 142 additions and 17 deletions

View File

@ -73,6 +73,7 @@ extern struct file_descriptor *get_fd(struct io_context *, int);
extern void close_fd(struct file_descriptor *descriptor);
extern void put_fd(struct file_descriptor *descriptor);
extern void disconnect_fd(struct file_descriptor *descriptor);
extern void inc_fd_ref_count(struct file_descriptor *descriptor);
extern status_t select_fd(int fd, uint8 event, uint32 ref,
struct select_sync *sync, bool kernel);
extern status_t deselect_fd(int fd, uint8 event, struct select_sync *sync,

View File

@ -139,7 +139,7 @@ put_fd(struct file_descriptor *descriptor)
free(descriptor);
} else if ((descriptor->open_mode & O_DISCONNECTED) != 0
&& previous == descriptor->open_count) {
&& previous - 1 == descriptor->open_count) {
// the descriptor has been disconnected - it cannot
// be accessed anymore, let's close it (no one is
// currently accessing this descriptor)
@ -188,6 +188,13 @@ disconnect_fd(struct file_descriptor *descriptor)
}
void
inc_fd_ref_count(struct file_descriptor *descriptor)
{
atomic_add(&descriptor->ref_count, 1);
}
struct file_descriptor *
get_fd(struct io_context *context, int fd)
{
@ -206,7 +213,7 @@ get_fd(struct io_context *context, int fd)
if (descriptor->open_mode & O_DISCONNECTED)
descriptor = NULL;
else
atomic_add(&descriptor->ref_count, 1);
inc_fd_ref_count(descriptor);
}
mutex_unlock(&context->io_mutex);
@ -231,10 +238,14 @@ remove_fd(struct io_context *context, int fd)
if ((uint32)fd < context->table_size)
descriptor = context->fds[fd];
if (descriptor) { // fd is valid
if (descriptor) {
// fd is valid
context->fds[fd] = NULL;
fd_set_close_on_exec(context, fd, false);
context->num_used_fds--;
if (descriptor->open_mode & O_DISCONNECTED)
descriptor = NULL;
}
mutex_unlock(&context->io_mutex);

View File

@ -1249,7 +1249,6 @@ resolve_mount_point_to_volume_root(mount_id mountID, vnode_id nodeID,
if (error != B_OK)
return error;
// resolve the node
struct vnode *resolvedNode = resolve_mount_point_to_volume_root(node);
if (resolvedNode) {
@ -1538,7 +1537,7 @@ static status_t
path_to_vnode(char *path, bool traverseLink, struct vnode **_vnode,
vnode_id *_parentID, bool kernel)
{
struct vnode *start;
struct vnode *start = NULL;
FUNCTION(("path_to_vnode(path = \"%s\")\n", path));
@ -1561,8 +1560,12 @@ path_to_vnode(char *path, bool traverseLink, struct vnode **_vnode,
mutex_lock(&context->io_mutex);
start = context->cwd;
inc_vnode_ref_count(start);
if (start != NULL)
inc_vnode_ref_count(start);
mutex_unlock(&context->io_mutex);
if (start == NULL)
return B_ERROR;
}
return vnode_path_to_vnode(start, path, traverseLink, 0, _vnode, _parentID, NULL);
@ -3015,8 +3018,6 @@ vfs_free_io_context(void *_ioContext)
}
}
mutex_unlock(&context->io_mutex);
mutex_destroy(&context->io_mutex);
remove_node_monitors(context);
@ -3826,10 +3827,12 @@ common_fcntl(int fd, int op, uint32 argument, bool kernel)
// we only accept changes to O_APPEND and O_NONBLOCK
argument &= O_APPEND | O_NONBLOCK;
status = FS_CALL(vnode, set_flags)(vnode->mount->cookie, vnode->private_node, descriptor->cookie, (int)argument);
status = FS_CALL(vnode, set_flags)(vnode->mount->cookie,
vnode->private_node, descriptor->cookie, (int)argument);
if (status == B_OK) {
// update this descriptor's open_mode field
descriptor->open_mode = (descriptor->open_mode & ~(O_APPEND | O_NONBLOCK)) | argument;
descriptor->open_mode = (descriptor->open_mode & ~(O_APPEND | O_NONBLOCK))
| argument;
}
} else
status = EOPNOTSUPP;
@ -5138,19 +5141,129 @@ fs_unmount(char *path, uint32 flags, bool kernel)
// from the path_to_vnode() call above
mount->root_vnode->ref_count -= 2;
// cycle through the list of vnodes associated with this mount and
// make sure all of them are not busy or have refs on them
vnode = NULL;
while ((vnode = (struct vnode *)list_get_next_item(&mount->vnodes, vnode)) != NULL) {
if (vnode->busy || vnode->ref_count != 0) {
// there are still vnodes in use on this mount, so we cannot unmount yet
// ToDo: cut read/write access file descriptors, depending on the B_FORCE_UNMOUNT flag
bool disconnectedDescriptors = false;
while (true) {
bool busy = false;
// cycle through the list of vnodes associated with this mount and
// make sure all of them are not busy or have refs on them
vnode = NULL;
while ((vnode = (struct vnode *)list_get_next_item(&mount->vnodes, vnode)) != NULL) {
if (vnode->busy || vnode->ref_count != 0) {
// there are still vnodes in use on this mount, so we cannot
// unmount yet
busy = true;
break;
}
}
if (!busy)
break;
if ((flags & B_FORCE_UNMOUNT) == 0) {
mount->root_vnode->ref_count += 2;
mutex_unlock(&sVnodeMutex);
put_vnode(mount->root_vnode);
return B_BUSY;
}
if (disconnectedDescriptors) {
// wait a bit until the last access is finished, and then try again
mutex_unlock(&sVnodeMutex);
snooze(100000);
mutex_lock(&sVnodeMutex);
continue;
}
// the file system is still busy - but we're forced to unmount it,
// so let's disconnect all open file descriptors
mount->unmounting = true;
// prevent new vnodes from being created
mutex_unlock(&sVnodeMutex);
// iterate over all teams and peek into their file descriptors
int32 nextTeamID = 0;
while (true) {
struct io_context *context = NULL;
sem_id contextMutex = -1;
struct team *team = NULL;
team_id lastTeamID;
cpu_status state = disable_interrupts();
GRAB_TEAM_LOCK();
lastTeamID = peek_next_thread_id();
if (nextTeamID < lastTeamID) {
// get next valid team
while (nextTeamID < lastTeamID
&& !(team = team_get_team_struct_locked(nextTeamID))) {
nextTeamID++;
}
if (team) {
context = (io_context *)team->io_context;
contextMutex = context->io_mutex.sem;
nextTeamID++;
}
}
RELEASE_TEAM_LOCK();
restore_interrupts(state);
if (context == NULL)
break;
// we now have a context - since we couldn't lock it while having
// safe access to the team structure, we now need to lock the mutex
// manually
if (acquire_sem(contextMutex) != B_OK) {
// team seems to be gone, go over to the next team
continue;
}
// the team cannot be deleted completely while we're owning its
// io_context mutex, so we can safely play with it now
context->io_mutex.holder = thread_get_current_thread_id();
if (context->cwd != NULL && context->cwd->mount == mount) {
put_vnode(context->cwd);
if (context->cwd == mount->root_vnode) {
// redirect the current working directory to the covered vnode
context->cwd = mount->covers_vnode;
inc_vnode_ref_count(context->cwd);
} else
context->cwd = NULL;
}
for (uint32 i = 0; i < context->table_size; i++) {
if (struct file_descriptor *descriptor = context->fds[i]) {
inc_fd_ref_count(descriptor);
// if this descriptor points at this mount, we
// need to disconnect it to be able to unmount
vnode = fd_vnode(descriptor);
if (vnode != NULL && vnode->mount == mount
|| vnode == NULL && descriptor->u.mount == mount)
disconnect_fd(descriptor);
put_fd(descriptor);
}
}
mutex_unlock(&context->io_mutex);
}
disconnectedDescriptors = true;
mutex_lock(&sVnodeMutex);
}
// we can safely continue, mark all of the vnodes busy and this mount