2012-07-23 16:15:30 +04:00
|
|
|
/* $NetBSD: puffs_vnops.c,v 1.169 2012/07/23 12:15:30 manu Exp $ */
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
/*
|
2007-04-24 20:29:29 +04:00
|
|
|
* Copyright (c) 2005, 2006, 2007 Antti Kantee. All Rights Reserved.
|
2006-10-23 02:43:23 +04:00
|
|
|
*
|
|
|
|
* Development of this software was supported by the
|
|
|
|
* Google Summer of Code program and the Ulla Tuominen Foundation.
|
|
|
|
* The Google SoC project was mentored by Bill Studenmund.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
|
|
|
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
2012-07-23 16:15:30 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: puffs_vnops.c,v 1.169 2012/07/23 12:15:30 manu Exp $");
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
2008-11-16 22:34:29 +03:00
|
|
|
#include <sys/buf.h>
|
2011-01-11 17:04:54 +03:00
|
|
|
#include <sys/lockf.h>
|
2006-10-23 02:43:23 +04:00
|
|
|
#include <sys/malloc.h>
|
2007-01-27 01:59:49 +03:00
|
|
|
#include <sys/mount.h>
|
2006-10-23 02:43:23 +04:00
|
|
|
#include <sys/namei.h>
|
2007-01-27 01:59:49 +03:00
|
|
|
#include <sys/vnode.h>
|
2007-07-10 00:51:58 +04:00
|
|
|
#include <sys/proc.h>
|
2012-04-08 19:04:41 +04:00
|
|
|
#include <sys/kernel.h> /* For hz, hardclock_ticks */
|
2007-07-10 00:51:58 +04:00
|
|
|
|
2007-02-08 07:52:23 +03:00
|
|
|
#include <uvm/uvm.h>
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
#include <fs/puffs/puffs_msgif.h>
|
|
|
|
#include <fs/puffs/puffs_sys.h>
|
|
|
|
|
2006-10-27 16:25:16 +04:00
|
|
|
#include <miscfs/fifofs/fifo.h>
|
2006-10-23 02:43:23 +04:00
|
|
|
#include <miscfs/genfs/genfs.h>
|
2006-10-27 02:52:47 +04:00
|
|
|
#include <miscfs/specfs/specdev.h>
|
|
|
|
|
2007-12-31 02:04:12 +03:00
|
|
|
int puffs_vnop_lookup(void *);
|
|
|
|
int puffs_vnop_create(void *);
|
|
|
|
int puffs_vnop_access(void *);
|
|
|
|
int puffs_vnop_mknod(void *);
|
|
|
|
int puffs_vnop_open(void *);
|
|
|
|
int puffs_vnop_close(void *);
|
|
|
|
int puffs_vnop_getattr(void *);
|
|
|
|
int puffs_vnop_setattr(void *);
|
|
|
|
int puffs_vnop_reclaim(void *);
|
|
|
|
int puffs_vnop_readdir(void *);
|
|
|
|
int puffs_vnop_poll(void *);
|
|
|
|
int puffs_vnop_fsync(void *);
|
|
|
|
int puffs_vnop_seek(void *);
|
|
|
|
int puffs_vnop_remove(void *);
|
|
|
|
int puffs_vnop_mkdir(void *);
|
|
|
|
int puffs_vnop_rmdir(void *);
|
|
|
|
int puffs_vnop_link(void *);
|
|
|
|
int puffs_vnop_readlink(void *);
|
|
|
|
int puffs_vnop_symlink(void *);
|
|
|
|
int puffs_vnop_rename(void *);
|
|
|
|
int puffs_vnop_read(void *);
|
|
|
|
int puffs_vnop_write(void *);
|
|
|
|
int puffs_vnop_fcntl(void *);
|
|
|
|
int puffs_vnop_ioctl(void *);
|
|
|
|
int puffs_vnop_inactive(void *);
|
|
|
|
int puffs_vnop_print(void *);
|
|
|
|
int puffs_vnop_pathconf(void *);
|
|
|
|
int puffs_vnop_advlock(void *);
|
|
|
|
int puffs_vnop_strategy(void *);
|
|
|
|
int puffs_vnop_bmap(void *);
|
|
|
|
int puffs_vnop_mmap(void *);
|
|
|
|
int puffs_vnop_getpages(void *);
|
2009-10-18 03:16:05 +04:00
|
|
|
int puffs_vnop_abortop(void *);
|
2010-05-21 14:16:54 +04:00
|
|
|
int puffs_vnop_getextattr(void *);
|
|
|
|
int puffs_vnop_setextattr(void *);
|
|
|
|
int puffs_vnop_listextattr(void *);
|
|
|
|
int puffs_vnop_deleteextattr(void *);
|
2007-12-31 02:04:12 +03:00
|
|
|
|
|
|
|
int puffs_vnop_spec_read(void *);
|
|
|
|
int puffs_vnop_spec_write(void *);
|
|
|
|
int puffs_vnop_fifo_read(void *);
|
|
|
|
int puffs_vnop_fifo_write(void *);
|
|
|
|
|
|
|
|
int puffs_vnop_checkop(void *);
|
|
|
|
|
2009-11-05 22:42:44 +03:00
|
|
|
#define puffs_vnop_lock genfs_lock
|
|
|
|
#define puffs_vnop_unlock genfs_unlock
|
|
|
|
#define puffs_vnop_islocked genfs_islocked
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
int (**puffs_vnodeop_p)(void *);
|
|
|
|
const struct vnodeopv_entry_desc puffs_vnodeop_entries[] = {
|
|
|
|
{ &vop_default_desc, vn_default_error },
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_lookup_desc, puffs_vnop_lookup }, /* REAL lookup */
|
|
|
|
{ &vop_create_desc, puffs_vnop_checkop }, /* create */
|
|
|
|
{ &vop_mknod_desc, puffs_vnop_checkop }, /* mknod */
|
|
|
|
{ &vop_open_desc, puffs_vnop_open }, /* REAL open */
|
|
|
|
{ &vop_close_desc, puffs_vnop_checkop }, /* close */
|
|
|
|
{ &vop_access_desc, puffs_vnop_access }, /* REAL access */
|
|
|
|
{ &vop_getattr_desc, puffs_vnop_checkop }, /* getattr */
|
|
|
|
{ &vop_setattr_desc, puffs_vnop_checkop }, /* setattr */
|
|
|
|
{ &vop_read_desc, puffs_vnop_checkop }, /* read */
|
|
|
|
{ &vop_write_desc, puffs_vnop_checkop }, /* write */
|
|
|
|
{ &vop_fsync_desc, puffs_vnop_fsync }, /* REAL fsync */
|
|
|
|
{ &vop_seek_desc, puffs_vnop_checkop }, /* seek */
|
|
|
|
{ &vop_remove_desc, puffs_vnop_checkop }, /* remove */
|
|
|
|
{ &vop_link_desc, puffs_vnop_checkop }, /* link */
|
|
|
|
{ &vop_rename_desc, puffs_vnop_checkop }, /* rename */
|
|
|
|
{ &vop_mkdir_desc, puffs_vnop_checkop }, /* mkdir */
|
|
|
|
{ &vop_rmdir_desc, puffs_vnop_checkop }, /* rmdir */
|
|
|
|
{ &vop_symlink_desc, puffs_vnop_checkop }, /* symlink */
|
|
|
|
{ &vop_readdir_desc, puffs_vnop_checkop }, /* readdir */
|
|
|
|
{ &vop_readlink_desc, puffs_vnop_checkop }, /* readlink */
|
|
|
|
{ &vop_getpages_desc, puffs_vnop_checkop }, /* getpages */
|
2007-02-09 01:55:06 +03:00
|
|
|
{ &vop_putpages_desc, genfs_putpages }, /* REAL putpages */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_pathconf_desc, puffs_vnop_checkop }, /* pathconf */
|
2011-05-03 17:16:47 +04:00
|
|
|
{ &vop_advlock_desc, puffs_vnop_advlock }, /* advlock */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_strategy_desc, puffs_vnop_strategy }, /* REAL strategy */
|
2007-01-17 00:58:49 +03:00
|
|
|
{ &vop_revoke_desc, genfs_revoke }, /* REAL revoke */
|
2009-10-18 03:16:05 +04:00
|
|
|
{ &vop_abortop_desc, puffs_vnop_abortop }, /* REAL abortop */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_inactive_desc, puffs_vnop_inactive }, /* REAL inactive */
|
|
|
|
{ &vop_reclaim_desc, puffs_vnop_reclaim }, /* REAL reclaim */
|
|
|
|
{ &vop_lock_desc, puffs_vnop_lock }, /* REAL lock */
|
|
|
|
{ &vop_unlock_desc, puffs_vnop_unlock }, /* REAL unlock */
|
|
|
|
{ &vop_bmap_desc, puffs_vnop_bmap }, /* REAL bmap */
|
|
|
|
{ &vop_print_desc, puffs_vnop_print }, /* REAL print */
|
|
|
|
{ &vop_islocked_desc, puffs_vnop_islocked }, /* REAL islocked */
|
2006-12-01 15:37:41 +03:00
|
|
|
{ &vop_bwrite_desc, genfs_nullop }, /* REAL bwrite */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_mmap_desc, puffs_vnop_mmap }, /* REAL mmap */
|
|
|
|
{ &vop_poll_desc, puffs_vnop_poll }, /* REAL poll */
|
2010-05-21 14:16:54 +04:00
|
|
|
{ &vop_getextattr_desc, puffs_vnop_getextattr }, /* getextattr */
|
|
|
|
{ &vop_setextattr_desc, puffs_vnop_setextattr }, /* setextattr */
|
|
|
|
{ &vop_listextattr_desc, puffs_vnop_listextattr }, /* listextattr */
|
|
|
|
{ &vop_deleteextattr_desc, puffs_vnop_deleteextattr },/* deleteextattr */
|
|
|
|
#if 0
|
|
|
|
{ &vop_openextattr_desc, puffs_vnop_checkop }, /* openextattr */
|
|
|
|
{ &vop_closeextattr_desc, puffs_vnop_checkop }, /* closeextattr */
|
|
|
|
#endif
|
2007-05-18 17:53:08 +04:00
|
|
|
{ &vop_kqfilter_desc, genfs_eopnotsupp }, /* kqfilter XXX */
|
2006-10-23 02:43:23 +04:00
|
|
|
{ NULL, NULL }
|
|
|
|
};
|
|
|
|
const struct vnodeopv_desc puffs_vnodeop_opv_desc =
|
|
|
|
{ &puffs_vnodeop_p, puffs_vnodeop_entries };
|
|
|
|
|
2006-10-27 02:52:47 +04:00
|
|
|
|
|
|
|
int (**puffs_specop_p)(void *);
|
|
|
|
const struct vnodeopv_entry_desc puffs_specop_entries[] = {
|
|
|
|
{ &vop_default_desc, vn_default_error },
|
|
|
|
{ &vop_lookup_desc, spec_lookup }, /* lookup, ENOTDIR */
|
2006-10-27 16:25:16 +04:00
|
|
|
{ &vop_create_desc, spec_create }, /* genfs_badop */
|
|
|
|
{ &vop_mknod_desc, spec_mknod }, /* genfs_badop */
|
|
|
|
{ &vop_open_desc, spec_open }, /* spec_open */
|
2007-01-01 23:16:36 +03:00
|
|
|
{ &vop_close_desc, spec_close }, /* spec_close */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_access_desc, puffs_vnop_checkop }, /* access */
|
|
|
|
{ &vop_getattr_desc, puffs_vnop_checkop }, /* getattr */
|
|
|
|
{ &vop_setattr_desc, puffs_vnop_checkop }, /* setattr */
|
|
|
|
{ &vop_read_desc, puffs_vnop_spec_read }, /* update, read */
|
|
|
|
{ &vop_write_desc, puffs_vnop_spec_write }, /* update, write */
|
2006-10-27 02:52:47 +04:00
|
|
|
{ &vop_ioctl_desc, spec_ioctl }, /* spec_ioctl */
|
|
|
|
{ &vop_fcntl_desc, genfs_fcntl }, /* dummy */
|
|
|
|
{ &vop_poll_desc, spec_poll }, /* spec_poll */
|
|
|
|
{ &vop_kqfilter_desc, spec_kqfilter }, /* spec_kqfilter */
|
|
|
|
{ &vop_revoke_desc, spec_revoke }, /* genfs_revoke */
|
2007-05-18 17:53:08 +04:00
|
|
|
{ &vop_mmap_desc, spec_mmap }, /* spec_mmap */
|
2006-10-27 23:01:48 +04:00
|
|
|
{ &vop_fsync_desc, spec_fsync }, /* vflushbuf */
|
2006-10-27 02:52:47 +04:00
|
|
|
{ &vop_seek_desc, spec_seek }, /* genfs_nullop */
|
|
|
|
{ &vop_remove_desc, spec_remove }, /* genfs_badop */
|
|
|
|
{ &vop_link_desc, spec_link }, /* genfs_badop */
|
|
|
|
{ &vop_rename_desc, spec_rename }, /* genfs_badop */
|
|
|
|
{ &vop_mkdir_desc, spec_mkdir }, /* genfs_badop */
|
|
|
|
{ &vop_rmdir_desc, spec_rmdir }, /* genfs_badop */
|
|
|
|
{ &vop_symlink_desc, spec_symlink }, /* genfs_badop */
|
|
|
|
{ &vop_readdir_desc, spec_readdir }, /* genfs_badop */
|
|
|
|
{ &vop_readlink_desc, spec_readlink }, /* genfs_badop */
|
|
|
|
{ &vop_abortop_desc, spec_abortop }, /* genfs_badop */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_inactive_desc, puffs_vnop_inactive }, /* REAL inactive */
|
|
|
|
{ &vop_reclaim_desc, puffs_vnop_reclaim }, /* REAL reclaim */
|
|
|
|
{ &vop_lock_desc, puffs_vnop_lock }, /* REAL lock */
|
|
|
|
{ &vop_unlock_desc, puffs_vnop_unlock }, /* REAL unlock */
|
2006-10-27 02:52:47 +04:00
|
|
|
{ &vop_bmap_desc, spec_bmap }, /* dummy */
|
|
|
|
{ &vop_strategy_desc, spec_strategy }, /* dev strategy */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_print_desc, puffs_vnop_print }, /* REAL print */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_islocked_desc, puffs_vnop_islocked }, /* REAL islocked */
|
2006-10-27 02:52:47 +04:00
|
|
|
{ &vop_pathconf_desc, spec_pathconf }, /* pathconf */
|
|
|
|
{ &vop_advlock_desc, spec_advlock }, /* lf_advlock */
|
|
|
|
{ &vop_bwrite_desc, vn_bwrite }, /* bwrite */
|
|
|
|
{ &vop_getpages_desc, spec_getpages }, /* genfs_getpages */
|
|
|
|
{ &vop_putpages_desc, spec_putpages }, /* genfs_putpages */
|
2010-05-21 14:16:54 +04:00
|
|
|
{ &vop_getextattr_desc, puffs_vnop_checkop }, /* getextattr */
|
|
|
|
{ &vop_setextattr_desc, puffs_vnop_checkop }, /* setextattr */
|
|
|
|
{ &vop_listextattr_desc, puffs_vnop_checkop }, /* listextattr */
|
|
|
|
{ &vop_deleteextattr_desc, puffs_vnop_checkop },/* deleteextattr */
|
2006-10-27 02:52:47 +04:00
|
|
|
#if 0
|
|
|
|
{ &vop_openextattr_desc, _openextattr }, /* openextattr */
|
|
|
|
{ &vop_closeextattr_desc, _closeextattr }, /* closeextattr */
|
|
|
|
#endif
|
|
|
|
{ NULL, NULL }
|
|
|
|
};
|
|
|
|
const struct vnodeopv_desc puffs_specop_opv_desc =
|
|
|
|
{ &puffs_specop_p, puffs_specop_entries };
|
|
|
|
|
2006-12-30 04:29:03 +03:00
|
|
|
|
2006-10-27 16:25:16 +04:00
|
|
|
int (**puffs_fifoop_p)(void *);
|
|
|
|
const struct vnodeopv_entry_desc puffs_fifoop_entries[] = {
|
|
|
|
{ &vop_default_desc, vn_default_error },
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_lookup_desc, vn_fifo_bypass }, /* lookup, ENOTDIR */
|
|
|
|
{ &vop_create_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_mknod_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_open_desc, vn_fifo_bypass }, /* open */
|
|
|
|
{ &vop_close_desc, vn_fifo_bypass }, /* close */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_access_desc, puffs_vnop_checkop }, /* access */
|
|
|
|
{ &vop_getattr_desc, puffs_vnop_checkop }, /* getattr */
|
|
|
|
{ &vop_setattr_desc, puffs_vnop_checkop }, /* setattr */
|
|
|
|
{ &vop_read_desc, puffs_vnop_fifo_read }, /* read, update */
|
|
|
|
{ &vop_write_desc, puffs_vnop_fifo_write }, /* write, update */
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */
|
2006-10-27 16:25:16 +04:00
|
|
|
{ &vop_fcntl_desc, genfs_fcntl }, /* dummy */
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_poll_desc, vn_fifo_bypass }, /* poll */
|
|
|
|
{ &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */
|
|
|
|
{ &vop_revoke_desc, vn_fifo_bypass }, /* genfs_revoke */
|
|
|
|
{ &vop_mmap_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_fsync_desc, vn_fifo_bypass }, /* genfs_nullop*/
|
|
|
|
{ &vop_seek_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_remove_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_link_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_rename_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_mkdir_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_rmdir_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_symlink_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_readdir_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_readlink_desc, vn_fifo_bypass }, /* genfs_badop */
|
|
|
|
{ &vop_abortop_desc, vn_fifo_bypass }, /* genfs_badop */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_inactive_desc, puffs_vnop_inactive }, /* REAL inactive */
|
|
|
|
{ &vop_reclaim_desc, puffs_vnop_reclaim }, /* REAL reclaim */
|
|
|
|
{ &vop_lock_desc, puffs_vnop_lock }, /* REAL lock */
|
|
|
|
{ &vop_unlock_desc, puffs_vnop_unlock }, /* REAL unlock */
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_bmap_desc, vn_fifo_bypass }, /* dummy */
|
|
|
|
{ &vop_strategy_desc, vn_fifo_bypass }, /* genfs_badop */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_print_desc, puffs_vnop_print }, /* REAL print */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_islocked_desc, puffs_vnop_islocked }, /* REAL islocked */
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */
|
|
|
|
{ &vop_advlock_desc, vn_fifo_bypass }, /* genfs_einval */
|
2006-10-27 16:25:16 +04:00
|
|
|
{ &vop_bwrite_desc, vn_bwrite }, /* bwrite */
|
2010-03-29 17:11:32 +04:00
|
|
|
{ &vop_putpages_desc, vn_fifo_bypass }, /* genfs_null_putpages*/
|
2006-10-27 16:25:16 +04:00
|
|
|
#if 0
|
|
|
|
{ &vop_openextattr_desc, _openextattr }, /* openextattr */
|
|
|
|
{ &vop_closeextattr_desc, _closeextattr }, /* closeextattr */
|
|
|
|
#endif
|
2010-05-21 14:16:54 +04:00
|
|
|
{ &vop_getextattr_desc, puffs_vnop_checkop }, /* getextattr */
|
|
|
|
{ &vop_setextattr_desc, puffs_vnop_checkop }, /* setextattr */
|
|
|
|
{ &vop_listextattr_desc, puffs_vnop_checkop }, /* listextattr */
|
|
|
|
{ &vop_deleteextattr_desc, puffs_vnop_checkop }, /* deleteextattr */
|
2006-10-27 16:25:16 +04:00
|
|
|
{ NULL, NULL }
|
|
|
|
};
|
|
|
|
const struct vnodeopv_desc puffs_fifoop_opv_desc =
|
|
|
|
{ &puffs_fifoop_p, puffs_fifoop_entries };
|
|
|
|
|
|
|
|
|
2006-12-01 15:37:41 +03:00
|
|
|
/* "real" vnode operations */
|
|
|
|
int (**puffs_msgop_p)(void *);
|
|
|
|
const struct vnodeopv_entry_desc puffs_msgop_entries[] = {
|
|
|
|
{ &vop_default_desc, vn_default_error },
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_create_desc, puffs_vnop_create }, /* create */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_mknod_desc, puffs_vnop_mknod }, /* mknod */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_open_desc, puffs_vnop_open }, /* open */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_close_desc, puffs_vnop_close }, /* close */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_access_desc, puffs_vnop_access }, /* access */
|
|
|
|
{ &vop_getattr_desc, puffs_vnop_getattr }, /* getattr */
|
|
|
|
{ &vop_setattr_desc, puffs_vnop_setattr }, /* setattr */
|
|
|
|
{ &vop_read_desc, puffs_vnop_read }, /* read */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_write_desc, puffs_vnop_write }, /* write */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_seek_desc, puffs_vnop_seek }, /* seek */
|
|
|
|
{ &vop_remove_desc, puffs_vnop_remove }, /* remove */
|
|
|
|
{ &vop_link_desc, puffs_vnop_link }, /* link */
|
|
|
|
{ &vop_rename_desc, puffs_vnop_rename }, /* rename */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_mkdir_desc, puffs_vnop_mkdir }, /* mkdir */
|
|
|
|
{ &vop_rmdir_desc, puffs_vnop_rmdir }, /* rmdir */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_symlink_desc, puffs_vnop_symlink }, /* symlink */
|
|
|
|
{ &vop_readdir_desc, puffs_vnop_readdir }, /* readdir */
|
|
|
|
{ &vop_readlink_desc, puffs_vnop_readlink }, /* readlink */
|
2007-12-31 02:04:12 +03:00
|
|
|
{ &vop_print_desc, puffs_vnop_print }, /* print */
|
2009-09-30 22:22:29 +04:00
|
|
|
{ &vop_islocked_desc, puffs_vnop_islocked }, /* islocked */
|
|
|
|
{ &vop_pathconf_desc, puffs_vnop_pathconf }, /* pathconf */
|
|
|
|
{ &vop_getpages_desc, puffs_vnop_getpages }, /* getpages */
|
2006-12-01 15:37:41 +03:00
|
|
|
{ NULL, NULL }
|
|
|
|
};
|
|
|
|
const struct vnodeopv_desc puffs_msgop_opv_desc =
|
|
|
|
{ &puffs_msgop_p, puffs_msgop_entries };
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
/*
|
|
|
|
* for dosetattr / update_va
|
|
|
|
*/
|
|
|
|
#define SETATTR_CHSIZE 0x01
|
|
|
|
#define SETATTR_ASYNC 0x02
|
2006-10-27 02:52:47 +04:00
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
#define ERROUT(err) \
|
|
|
|
do { \
|
|
|
|
error = err; \
|
|
|
|
goto out; \
|
|
|
|
} while (/*CONSTCOND*/0)
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2006-12-01 15:37:41 +03:00
|
|
|
/*
|
|
|
|
* This is a generic vnode operation handler. It checks if the necessary
|
|
|
|
* operations for the called vnode operation are implemented by userspace
|
|
|
|
* and either returns a dummy return value or proceeds to call the real
|
|
|
|
* vnode operation from puffs_msgop_v.
|
|
|
|
*
|
|
|
|
* XXX: this should described elsewhere and autogenerated, the complexity
|
|
|
|
* of the vnode operations vectors and their interrelationships is also
|
|
|
|
* getting a bit out of hand. Another problem is that we need this same
|
|
|
|
* information in the fs server code, so keeping the two in sync manually
|
|
|
|
* is not a viable (long term) plan.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* not supported, handle locking protocol */
|
|
|
|
#define CHECKOP_NOTSUPP(op) \
|
|
|
|
case VOP_##op##_DESCOFFSET: \
|
|
|
|
if (pmp->pmp_vnopmask[PUFFS_VN_##op] == 0) \
|
|
|
|
return genfs_eopnotsupp(v); \
|
|
|
|
break
|
|
|
|
|
|
|
|
/* always succeed, no locking */
|
|
|
|
#define CHECKOP_SUCCESS(op) \
|
|
|
|
case VOP_##op##_DESCOFFSET: \
|
|
|
|
if (pmp->pmp_vnopmask[PUFFS_VN_##op] == 0) \
|
|
|
|
return 0; \
|
|
|
|
break
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_checkop(void *v)
|
2006-12-01 15:37:41 +03:00
|
|
|
{
|
|
|
|
struct vop_generic_args /* {
|
|
|
|
struct vnodeop_desc *a_desc;
|
|
|
|
spooky mystery contents;
|
|
|
|
} */ *ap = v;
|
|
|
|
struct vnodeop_desc *desc = ap->a_desc;
|
|
|
|
struct puffs_mount *pmp;
|
|
|
|
struct vnode *vp;
|
2007-06-06 05:33:10 +04:00
|
|
|
int offset, rv;
|
2006-12-01 15:37:41 +03:00
|
|
|
|
|
|
|
offset = ap->a_desc->vdesc_vp_offsets[0];
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (offset == VDESC_NO_OFFSET)
|
|
|
|
panic("puffs_checkop: no vnode, why did you call me?");
|
|
|
|
#endif
|
|
|
|
vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
|
|
|
|
pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
|
2007-06-06 05:33:10 +04:00
|
|
|
DPRINTF_VERBOSE(("checkop call %s (%d), vp %p\n",
|
|
|
|
ap->a_desc->vdesc_name, ap->a_desc->vdesc_offset, vp));
|
|
|
|
|
2007-05-07 21:14:54 +04:00
|
|
|
if (!ALLOPS(pmp)) {
|
2006-12-01 15:37:41 +03:00
|
|
|
switch (desc->vdesc_offset) {
|
|
|
|
CHECKOP_NOTSUPP(CREATE);
|
|
|
|
CHECKOP_NOTSUPP(MKNOD);
|
|
|
|
CHECKOP_NOTSUPP(GETATTR);
|
|
|
|
CHECKOP_NOTSUPP(SETATTR);
|
|
|
|
CHECKOP_NOTSUPP(READ);
|
|
|
|
CHECKOP_NOTSUPP(WRITE);
|
|
|
|
CHECKOP_NOTSUPP(FCNTL);
|
|
|
|
CHECKOP_NOTSUPP(IOCTL);
|
|
|
|
CHECKOP_NOTSUPP(REMOVE);
|
|
|
|
CHECKOP_NOTSUPP(LINK);
|
|
|
|
CHECKOP_NOTSUPP(RENAME);
|
|
|
|
CHECKOP_NOTSUPP(MKDIR);
|
|
|
|
CHECKOP_NOTSUPP(RMDIR);
|
|
|
|
CHECKOP_NOTSUPP(SYMLINK);
|
|
|
|
CHECKOP_NOTSUPP(READDIR);
|
|
|
|
CHECKOP_NOTSUPP(READLINK);
|
|
|
|
CHECKOP_NOTSUPP(PRINT);
|
|
|
|
CHECKOP_NOTSUPP(PATHCONF);
|
2010-05-21 14:16:54 +04:00
|
|
|
CHECKOP_NOTSUPP(GETEXTATTR);
|
|
|
|
CHECKOP_NOTSUPP(SETEXTATTR);
|
|
|
|
CHECKOP_NOTSUPP(LISTEXTATTR);
|
|
|
|
CHECKOP_NOTSUPP(DELETEEXTATTR);
|
2006-12-01 15:37:41 +03:00
|
|
|
|
|
|
|
CHECKOP_SUCCESS(ACCESS);
|
2007-02-09 01:55:06 +03:00
|
|
|
CHECKOP_SUCCESS(CLOSE);
|
2006-12-01 15:37:41 +03:00
|
|
|
CHECKOP_SUCCESS(SEEK);
|
|
|
|
|
|
|
|
case VOP_GETPAGES_DESCOFFSET:
|
|
|
|
if (!EXISTSOP(pmp, READ))
|
|
|
|
return genfs_eopnotsupp(v);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("puffs_checkop: unhandled vnop %d",
|
|
|
|
desc->vdesc_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-06 05:33:10 +04:00
|
|
|
rv = VOCALL(puffs_msgop_p, ap->a_desc->vdesc_offset, v);
|
|
|
|
|
|
|
|
DPRINTF_VERBOSE(("checkop return %s (%d), vp %p: %d\n",
|
|
|
|
ap->a_desc->vdesc_name, ap->a_desc->vdesc_offset, vp, rv));
|
|
|
|
|
|
|
|
return rv;
|
2006-12-01 15:37:41 +03:00
|
|
|
}
|
|
|
|
|
2008-01-29 00:06:36 +03:00
|
|
|
static int callremove(struct puffs_mount *, puffs_cookie_t, puffs_cookie_t,
|
2007-10-02 05:17:17 +04:00
|
|
|
struct componentname *);
|
2008-01-29 00:06:36 +03:00
|
|
|
static int callrmdir(struct puffs_mount *, puffs_cookie_t, puffs_cookie_t,
|
2007-10-02 05:17:17 +04:00
|
|
|
struct componentname *);
|
2008-01-29 00:06:36 +03:00
|
|
|
static void callinactive(struct puffs_mount *, puffs_cookie_t, int);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
static void callreclaim(struct puffs_mount *, puffs_cookie_t, int);
|
2009-12-04 23:26:35 +03:00
|
|
|
static int flushvncache(struct vnode *, off_t, off_t, bool);
|
2012-04-08 19:04:41 +04:00
|
|
|
static void update_va(struct vnode *, struct vattr *, struct vattr *,
|
2012-04-18 04:42:50 +04:00
|
|
|
struct timespec *, struct timespec *, int);
|
2009-12-04 23:26:35 +03:00
|
|
|
|
2007-10-02 05:17:17 +04:00
|
|
|
|
|
|
|
#define PUFFS_ABORT_LOOKUP 1
|
|
|
|
#define PUFFS_ABORT_CREATE 2
|
|
|
|
#define PUFFS_ABORT_MKNOD 3
|
|
|
|
#define PUFFS_ABORT_MKDIR 4
|
|
|
|
#define PUFFS_ABORT_SYMLINK 5
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Press the pani^Wabort button! Kernel resource allocation failed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
puffs_abortbutton(struct puffs_mount *pmp, int what,
|
2008-01-29 00:06:36 +03:00
|
|
|
puffs_cookie_t dck, puffs_cookie_t ck, struct componentname *cnp)
|
2007-10-02 05:17:17 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
switch (what) {
|
|
|
|
case PUFFS_ABORT_CREATE:
|
|
|
|
case PUFFS_ABORT_MKNOD:
|
|
|
|
case PUFFS_ABORT_SYMLINK:
|
2008-01-29 00:06:36 +03:00
|
|
|
callremove(pmp, dck, ck, cnp);
|
2007-10-02 05:17:17 +04:00
|
|
|
break;
|
|
|
|
case PUFFS_ABORT_MKDIR:
|
2008-01-29 00:06:36 +03:00
|
|
|
callrmdir(pmp, dck, ck, cnp);
|
2007-10-02 05:17:17 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-01-29 00:06:36 +03:00
|
|
|
callinactive(pmp, ck, 0);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
callreclaim(pmp, ck, 0);
|
2007-10-02 05:17:17 +04:00
|
|
|
}
|
2006-12-01 15:37:41 +03:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
/*
|
|
|
|
* Begin vnode operations.
|
|
|
|
*
|
|
|
|
* A word from the keymaster about locks: generally we don't want
|
|
|
|
* to use the vnode locks at all: it creates an ugly dependency between
|
|
|
|
* the userlandia file server and the kernel. But we'll play along with
|
|
|
|
* the kernel vnode locks for now. However, even currently we attempt
|
|
|
|
* to release locks as early as possible. This is possible for some
|
|
|
|
* operations which a) don't need a locked vnode after the userspace op
|
|
|
|
* and b) return with the vnode unlocked. Theoretically we could
|
|
|
|
* unlock-do op-lock for others and order the graph in userspace, but I
|
|
|
|
* don't want to think of the consequences for the time being.
|
|
|
|
*/
|
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
#define TTL_TO_TIMEOUT(ts) \
|
|
|
|
(hardclock_ticks + (ts->tv_sec * hz) + (ts->tv_nsec * hz / 1000000000))
|
2012-04-18 04:42:50 +04:00
|
|
|
#define TTL_VALID(ts) \
|
|
|
|
((ts != NULL) && !((ts->tv_sec == 0) && (ts->tv_nsec == 0)))
|
|
|
|
#define TIMED_OUT(expire) \
|
|
|
|
((int)((unsigned int)hardclock_ticks - (unsigned int)expire) > 0)
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_lookup(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_lookup_args /* {
|
2006-12-01 15:37:41 +03:00
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
2006-10-23 02:43:23 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, lookup);
|
2006-10-23 02:43:23 +04:00
|
|
|
struct puffs_mount *pmp;
|
|
|
|
struct componentname *cnp;
|
2012-04-08 19:04:41 +04:00
|
|
|
struct vnode *vp, *dvp, *cvp;
|
|
|
|
struct puffs_node *dpn, *cpn;
|
2006-12-09 19:11:50 +03:00
|
|
|
int isdot;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
|
|
|
pmp = MPTOPUFFSMP(ap->a_dvp->v_mount);
|
|
|
|
cnp = ap->a_cnp;
|
|
|
|
dvp = ap->a_dvp;
|
2012-04-08 19:04:41 +04:00
|
|
|
cvp = NULL;
|
|
|
|
cpn = NULL;
|
2006-10-23 02:43:23 +04:00
|
|
|
*ap->a_vpp = NULL;
|
|
|
|
|
2007-08-13 13:48:55 +04:00
|
|
|
/* r/o fs? we check create later to handle EEXIST */
|
2007-08-12 23:44:15 +04:00
|
|
|
if ((cnp->cn_flags & ISLASTCN)
|
|
|
|
&& (dvp->v_mount->mnt_flag & MNT_RDONLY)
|
2007-08-13 13:48:55 +04:00
|
|
|
&& (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
|
2007-08-12 23:44:15 +04:00
|
|
|
return EROFS;
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
isdot = cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.';
|
|
|
|
|
2007-08-22 21:54:30 +04:00
|
|
|
DPRINTF(("puffs_lookup: \"%s\", parent vnode %p, op: %x\n",
|
2006-10-23 02:43:23 +04:00
|
|
|
cnp->cn_nameptr, dvp, cnp->cn_nameiop));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if someone fed it into the cache
|
|
|
|
*/
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (!isdot && PUFFS_USE_NAMECACHE(pmp)) {
|
2006-12-30 04:29:03 +03:00
|
|
|
error = cache_lookup(dvp, ap->a_vpp, cnp);
|
2007-01-09 21:14:31 +03:00
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
if ((error == 0) && PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
cvp = *ap->a_vpp;
|
|
|
|
cpn = VPTOPP(cvp);
|
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (TIMED_OUT(cpn->pn_cn_timeout)) {
|
|
|
|
cache_purge(cvp);
|
2012-04-08 19:04:41 +04:00
|
|
|
/*
|
|
|
|
* cached vnode (cvp) is still locked
|
|
|
|
* so that we can reuse it upon a new
|
|
|
|
* successful lookup.
|
|
|
|
*/
|
|
|
|
*ap->a_vpp = NULL;
|
|
|
|
error = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not use negative caching, since the filesystem
|
|
|
|
* provides no TTL for it.
|
|
|
|
*/
|
|
|
|
if ((error == ENOENT) && PUFFS_USE_FS_TTL(pmp))
|
|
|
|
error = -1;
|
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (error >= 0)
|
2006-12-30 04:29:03 +03:00
|
|
|
return error;
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
if (isdot) {
|
2010-07-14 18:07:37 +04:00
|
|
|
/* deal with rename lookup semantics */
|
|
|
|
if (cnp->cn_nameiop == RENAME && (cnp->cn_flags & ISLASTCN))
|
|
|
|
return EISDIR;
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
vp = ap->a_dvp;
|
|
|
|
vref(vp);
|
|
|
|
*ap->a_vpp = vp;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
if (cvp != NULL)
|
|
|
|
mutex_enter(&cpn->pn_sizemtx);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, lookup);
|
|
|
|
puffs_makecn(&lookup_msg->pvnr_cn, &lookup_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
if (cnp->cn_flags & ISDOTDOT)
|
2010-06-24 16:58:48 +04:00
|
|
|
VOP_UNLOCK(dvp);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_lookup, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_LOOKUP, VPTOPNC(dvp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_lookup, dvp->v_data, NULL, error);
|
2006-10-23 02:43:23 +04:00
|
|
|
DPRINTF(("puffs_lookup: return of the userspace, part %d\n", error));
|
|
|
|
|
|
|
|
/*
|
2006-12-30 04:29:03 +03:00
|
|
|
* In case of error, there is no new vnode to play with, so be
|
|
|
|
* happy with the NULL value given to vpp in the beginning.
|
|
|
|
* Also, check if this really was an error or the target was not
|
|
|
|
* present. Either treat it as a non-error for CREATE/RENAME or
|
|
|
|
* enter the component into the negative name cache (if desired).
|
2006-10-23 02:43:23 +04:00
|
|
|
*/
|
|
|
|
if (error) {
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-11-13 23:57:56 +03:00
|
|
|
if (error == ENOENT) {
|
2007-08-13 13:48:55 +04:00
|
|
|
/* don't allow to create files on r/o fs */
|
|
|
|
if ((dvp->v_mount->mnt_flag & MNT_RDONLY)
|
|
|
|
&& cnp->cn_nameiop == CREATE) {
|
|
|
|
error = EROFS;
|
|
|
|
|
|
|
|
/* adjust values if we are creating */
|
|
|
|
} else if ((cnp->cn_flags & ISLASTCN)
|
2006-10-23 02:43:23 +04:00
|
|
|
&& (cnp->cn_nameiop == CREATE
|
|
|
|
|| cnp->cn_nameiop == RENAME)) {
|
|
|
|
error = EJUSTRETURN;
|
2007-08-13 13:48:55 +04:00
|
|
|
|
|
|
|
/* save negative cache entry */
|
2006-12-30 04:29:03 +03:00
|
|
|
} else {
|
2012-07-22 04:53:18 +04:00
|
|
|
if (PUFFS_USE_NAMECACHE(pmp) &&
|
2012-07-23 16:15:30 +04:00
|
|
|
!PUFFS_USE_FS_TTL(pmp) &&
|
|
|
|
(cnp->cn_namelen <= NCHNAMLEN))
|
2006-12-30 04:29:03 +03:00
|
|
|
cache_enter(dvp, NULL, cnp);
|
2006-11-13 23:57:56 +03:00
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
2007-06-30 20:28:14 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-01-15 23:40:29 +03:00
|
|
|
/*
|
|
|
|
* Check that we don't get our parent node back, that would cause
|
|
|
|
* a pretty obvious deadlock.
|
|
|
|
*/
|
|
|
|
dpn = dvp->v_data;
|
2007-10-11 23:41:13 +04:00
|
|
|
if (lookup_msg->pvnr_newnode == dpn->pn_cookie) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_LOOKUP, EINVAL,
|
2007-10-11 23:41:13 +04:00
|
|
|
"lookup produced parent cookie", lookup_msg->pvnr_newnode);
|
2007-09-28 01:14:49 +04:00
|
|
|
error = EPROTO;
|
2007-06-30 20:28:14 +04:00
|
|
|
goto out;
|
2007-01-15 23:40:29 +03:00
|
|
|
}
|
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
/*
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
* Check if we looked up the cached vnode
|
2012-04-08 19:04:41 +04:00
|
|
|
*/
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
vp = NULL;
|
|
|
|
if (cvp && (VPTOPP(cvp)->pn_cookie == lookup_msg->pvnr_newnode)) {
|
|
|
|
int grace;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bump grace time of this node so that it does not get
|
|
|
|
* reclaimed too fast. We try to increase a bit more the
|
|
|
|
* lifetime of busiest * nodes - with some limits.
|
|
|
|
*/
|
|
|
|
grace = 10 * puffs_sopreq_expire_timeout;
|
|
|
|
cpn->pn_cn_grace = hardclock_ticks + grace;
|
2012-04-08 19:04:41 +04:00
|
|
|
vp = cvp;
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No cached vnode available, or the cached vnode does not
|
|
|
|
* match the userland cookie anymore: is the node known?
|
|
|
|
*/
|
|
|
|
if (vp == NULL) {
|
2012-04-08 19:04:41 +04:00
|
|
|
error = puffs_cookie2vnode(pmp, lookup_msg->pvnr_newnode,
|
|
|
|
1, 1, &vp);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
}
|
2012-04-08 19:04:41 +04:00
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (error == PUFFS_NOSUCHCOOKIE) {
|
|
|
|
error = puffs_getvnode(dvp->v_mount,
|
|
|
|
lookup_msg->pvnr_newnode, lookup_msg->pvnr_vtype,
|
|
|
|
lookup_msg->pvnr_size, lookup_msg->pvnr_rdev, &vp);
|
|
|
|
if (error) {
|
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_LOOKUP,
|
|
|
|
VPTOPNC(dvp), lookup_msg->pvnr_newnode,
|
|
|
|
ap->a_cnp);
|
2007-06-30 20:28:14 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
|
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
} else if (error) {
|
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_LOOKUP, VPTOPNC(dvp),
|
|
|
|
lookup_msg->pvnr_newnode, ap->a_cnp);
|
|
|
|
goto out;
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update cache and TTL
|
|
|
|
*/
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &lookup_msg->pvnr_va_ttl;
|
|
|
|
struct timespec *cn_ttl = &lookup_msg->pvnr_cn_ttl;
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(vp, NULL, &lookup_msg->pvnr_va,
|
|
|
|
va_ttl, cn_ttl, SETATTR_CHSIZE);
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
2007-09-28 01:44:12 +04:00
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
KASSERT(lookup_msg->pvnr_newnode == VPTOPP(vp)->pn_cookie);
|
2006-12-30 04:29:03 +03:00
|
|
|
*ap->a_vpp = vp;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2012-07-23 16:15:30 +04:00
|
|
|
if (PUFFS_USE_NAMECACHE(pmp) && (cnp->cn_namelen <= NCHNAMLEN))
|
2006-10-23 02:43:23 +04:00
|
|
|
cache_enter(dvp, vp, cnp);
|
|
|
|
|
2007-07-02 22:25:36 +04:00
|
|
|
/* XXX */
|
2007-10-11 23:41:13 +04:00
|
|
|
if ((lookup_msg->pvnr_cn.pkcn_flags & REQUIREDIR) == 0)
|
2007-07-02 22:25:36 +04:00
|
|
|
cnp->cn_flags &= ~REQUIREDIR;
|
2007-10-11 23:41:13 +04:00
|
|
|
if (lookup_msg->pvnr_cn.pkcn_consume)
|
|
|
|
cnp->cn_consume = MIN(lookup_msg->pvnr_cn.pkcn_consume,
|
2007-07-02 22:25:36 +04:00
|
|
|
strlen(cnp->cn_nameptr) - cnp->cn_namelen);
|
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
VPTOPP(vp)->pn_nlookup++;
|
2007-06-30 20:28:14 +04:00
|
|
|
out:
|
2012-04-08 19:04:41 +04:00
|
|
|
if (cvp != NULL) {
|
|
|
|
mutex_exit(&cpn->pn_sizemtx);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
|
|
|
|
if (error || (cvp != vp))
|
2012-04-08 19:04:41 +04:00
|
|
|
vput(cvp);
|
|
|
|
}
|
|
|
|
|
2006-12-09 19:11:50 +03:00
|
|
|
if (cnp->cn_flags & ISDOTDOT)
|
|
|
|
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
|
2007-08-13 13:48:55 +04:00
|
|
|
|
2007-02-10 16:12:43 +03:00
|
|
|
DPRINTF(("puffs_lookup: returning %d %p\n", error, *ap->a_vpp));
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(lookup);
|
2006-12-09 19:11:50 +03:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
#define REFPN_AND_UNLOCKVP(a, b) \
|
|
|
|
do { \
|
|
|
|
mutex_enter(&b->pn_mtx); \
|
|
|
|
puffs_referencenode(b); \
|
|
|
|
mutex_exit(&b->pn_mtx); \
|
2010-06-24 16:58:48 +04:00
|
|
|
VOP_UNLOCK(a); \
|
2007-11-18 00:30:48 +03:00
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
|
|
|
#define REFPN(b) \
|
|
|
|
do { \
|
|
|
|
mutex_enter(&b->pn_mtx); \
|
|
|
|
puffs_referencenode(b); \
|
|
|
|
mutex_exit(&b->pn_mtx); \
|
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
|
|
|
#define RELEPN_AND_VP(a, b) \
|
|
|
|
do { \
|
|
|
|
puffs_releasenode(b); \
|
|
|
|
vrele(a); \
|
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_create(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_create_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, create);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct mount *mp = dvp->v_mount;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(mp);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-02-10 16:12:43 +03:00
|
|
|
DPRINTF(("puffs_create: dvp %p, cnp: %s\n",
|
2007-10-11 23:41:13 +04:00
|
|
|
dvp, ap->a_cnp->cn_nameptr));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, create);
|
|
|
|
puffs_makecn(&create_msg->pvnr_cn, &create_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-10-11 23:41:13 +04:00
|
|
|
create_msg->pvnr_va = *ap->a_vap;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_create, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_CREATE, VPTOPNC(dvp));
|
2009-09-30 22:19:17 +04:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_create, dvp->v_data, NULL, error);
|
2007-11-18 00:30:48 +03:00
|
|
|
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2006-10-26 17:42:21 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
error = puffs_newnode(mp, dvp, ap->a_vpp,
|
2007-10-11 23:41:13 +04:00
|
|
|
create_msg->pvnr_newnode, cnp, ap->a_vap->va_type, 0);
|
2012-04-08 19:04:41 +04:00
|
|
|
if (error) {
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_CREATE, dpn->pn_cookie,
|
2007-10-11 23:41:13 +04:00
|
|
|
create_msg->pvnr_newnode, cnp);
|
2012-04-08 19:04:41 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &create_msg->pvnr_va_ttl;
|
|
|
|
struct timespec *cn_ttl = &create_msg->pvnr_cn_ttl;
|
|
|
|
struct vattr *rvap = &create_msg->pvnr_va;
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(*ap->a_vpp, NULL, rvap,
|
|
|
|
va_ttl, cn_ttl, SETATTR_CHSIZE);
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
2006-10-26 17:42:21 +04:00
|
|
|
|
|
|
|
out:
|
2009-09-30 22:19:17 +04:00
|
|
|
vput(dvp);
|
2007-02-10 16:12:43 +03:00
|
|
|
|
|
|
|
DPRINTF(("puffs_create: return %d\n", error));
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(create);
|
2006-10-26 17:42:21 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_mknod(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_mknod_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, mknod);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct mount *mp = dvp->v_mount;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(mp);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, mknod);
|
|
|
|
puffs_makecn(&mknod_msg->pvnr_cn, &mknod_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-10-11 23:41:13 +04:00
|
|
|
mknod_msg->pvnr_va = *ap->a_vap;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_mknod, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_MKNOD, VPTOPNC(dvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2009-09-30 22:19:17 +04:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_mknod, dvp->v_data, NULL, error);
|
2007-11-18 00:30:48 +03:00
|
|
|
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2006-10-26 17:42:21 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
error = puffs_newnode(mp, dvp, ap->a_vpp,
|
2007-10-11 23:41:13 +04:00
|
|
|
mknod_msg->pvnr_newnode, cnp, ap->a_vap->va_type,
|
2006-10-27 02:52:47 +04:00
|
|
|
ap->a_vap->va_rdev);
|
2012-04-08 19:04:41 +04:00
|
|
|
if (error) {
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_MKNOD, dpn->pn_cookie,
|
2007-10-11 23:41:13 +04:00
|
|
|
mknod_msg->pvnr_newnode, cnp);
|
2012-04-08 19:04:41 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &mknod_msg->pvnr_va_ttl;
|
|
|
|
struct timespec *cn_ttl = &mknod_msg->pvnr_cn_ttl;
|
|
|
|
struct vattr *rvap = &mknod_msg->pvnr_va;
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(*ap->a_vpp, NULL, rvap,
|
|
|
|
va_ttl, cn_ttl, SETATTR_CHSIZE);
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
2006-10-26 17:42:21 +04:00
|
|
|
|
|
|
|
out:
|
2009-09-30 22:19:17 +04:00
|
|
|
vput(dvp);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(mknod);
|
2006-10-26 17:42:21 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_open(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_open_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_mode;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, open);
|
2007-02-09 01:55:06 +03:00
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int mode = ap->a_mode;
|
2007-06-26 16:50:49 +04:00
|
|
|
int error;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-02-10 16:12:43 +03:00
|
|
|
DPRINTF(("puffs_open: vp %p, mode 0x%x\n", vp, mode));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
if (vp->v_type == VREG && mode & FWRITE && !EXISTSOP(pmp, WRITE))
|
|
|
|
ERROUT(EROFS);
|
2007-02-09 01:55:06 +03:00
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
if (!EXISTSOP(pmp, OPEN))
|
|
|
|
ERROUT(0);
|
2007-02-09 01:55:06 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, open);
|
|
|
|
open_msg->pvnr_mode = mode;
|
|
|
|
puffs_credcvt(&open_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_open, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_OPEN, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_open, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2007-02-10 16:12:43 +03:00
|
|
|
|
|
|
|
out:
|
2007-06-26 16:50:49 +04:00
|
|
|
DPRINTF(("puffs_open: returning %d\n", error));
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(open);
|
2007-06-26 16:50:49 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_close(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_close_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_fflag;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, close);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, close);
|
|
|
|
puffs_msg_setfaf(park_close);
|
|
|
|
close_msg->pvnr_fflag = ap->a_fflag;
|
|
|
|
puffs_credcvt(&close_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_close, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_CLOSE, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_close);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(close);
|
2007-04-22 22:50:28 +04:00
|
|
|
return 0;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_access(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_access_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_mode;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, access);
|
2007-02-09 01:55:06 +03:00
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int mode = ap->a_mode;
|
2007-10-02 01:09:07 +04:00
|
|
|
int error;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-08-12 23:44:15 +04:00
|
|
|
if (mode & VWRITE) {
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VDIR:
|
|
|
|
case VLNK:
|
|
|
|
case VREG:
|
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY)
|
|
|
|
|| !EXISTSOP(pmp, WRITE))
|
|
|
|
return EROFS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-02-09 01:55:06 +03:00
|
|
|
|
|
|
|
if (!EXISTSOP(pmp, ACCESS))
|
|
|
|
return 0;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, access);
|
|
|
|
access_msg->pvnr_mode = ap->a_mode;
|
|
|
|
puffs_credcvt(&access_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_access, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_ACCESS, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_access, vp->v_data, NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
PUFFS_MSG_RELEASE(access);
|
|
|
|
|
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
static void
|
|
|
|
update_va(struct vnode *vp, struct vattr *vap, struct vattr *rvap,
|
2012-04-18 04:42:50 +04:00
|
|
|
struct timespec *va_ttl, struct timespec *cn_ttl, int flags)
|
2012-04-08 19:04:41 +04:00
|
|
|
{
|
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (TTL_VALID(cn_ttl)) {
|
2012-04-08 19:04:41 +04:00
|
|
|
pn->pn_cn_timeout = TTL_TO_TIMEOUT(cn_ttl);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
pn->pn_cn_grace = MAX(pn->pn_cn_timeout, pn->pn_cn_grace);
|
|
|
|
}
|
2012-04-08 19:04:41 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't listen to the file server regarding special device
|
|
|
|
* size info, the file server doesn't know anything about them.
|
|
|
|
*/
|
|
|
|
if (vp->v_type == VBLK || vp->v_type == VCHR)
|
|
|
|
rvap->va_size = vp->v_size;
|
|
|
|
|
|
|
|
/* Ditto for blocksize (ufs comment: this doesn't belong here) */
|
|
|
|
if (vp->v_type == VBLK)
|
|
|
|
rvap->va_blocksize = BLKDEV_IOSIZE;
|
|
|
|
else if (vp->v_type == VCHR)
|
|
|
|
rvap->va_blocksize = MAXBSIZE;
|
|
|
|
|
|
|
|
if (vap != NULL) {
|
|
|
|
(void) memcpy(vap, rvap, sizeof(struct vattr));
|
|
|
|
vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
|
|
|
|
|
|
|
|
if (pn->pn_stat & PNODE_METACACHE_ATIME)
|
|
|
|
vap->va_atime = pn->pn_mc_atime;
|
|
|
|
if (pn->pn_stat & PNODE_METACACHE_CTIME)
|
|
|
|
vap->va_ctime = pn->pn_mc_ctime;
|
|
|
|
if (pn->pn_stat & PNODE_METACACHE_MTIME)
|
|
|
|
vap->va_mtime = pn->pn_mc_mtime;
|
|
|
|
if (pn->pn_stat & PNODE_METACACHE_SIZE)
|
|
|
|
vap->va_size = pn->pn_mc_size;
|
|
|
|
}
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
if (!(pn->pn_stat & PNODE_METACACHE_SIZE) && (flags & SETATTR_CHSIZE)) {
|
2012-04-08 19:04:41 +04:00
|
|
|
if (rvap->va_size != VNOVAL
|
|
|
|
&& vp->v_type != VBLK && vp->v_type != VCHR) {
|
|
|
|
uvm_vnp_setsize(vp, rvap->va_size);
|
|
|
|
pn->pn_serversize = rvap->va_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
if ((va_ttl != NULL) && TTL_VALID(va_ttl)) {
|
2012-04-08 19:04:41 +04:00
|
|
|
if (pn->pn_va_cache == NULL)
|
|
|
|
pn->pn_va_cache = pool_get(&puffs_vapool, PR_WAITOK);
|
|
|
|
|
|
|
|
(void)memcpy(pn->pn_va_cache, rvap, sizeof(*rvap));
|
|
|
|
|
|
|
|
pn->pn_va_timeout = TTL_TO_TIMEOUT(va_ttl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_getattr(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_getattr_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, getattr);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct mount *mp = vp->v_mount;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(mp);
|
2007-05-09 01:39:03 +04:00
|
|
|
struct vattr *vap, *rvap;
|
2010-01-14 17:44:13 +03:00
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2012-04-08 19:04:41 +04:00
|
|
|
struct timespec *va_ttl = NULL;
|
2007-10-11 23:41:13 +04:00
|
|
|
int error = 0;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
/*
|
|
|
|
* A lock is required so that we do not race with
|
|
|
|
* setattr, write and fsync when changing vp->v_size.
|
|
|
|
* This is critical, since setting a stall smaler value
|
|
|
|
* triggers a file truncate in uvm_vnp_setsize(), which
|
|
|
|
* most of the time means data corruption (a chunk of
|
|
|
|
* data is replaced by zeroes). This can be removed if
|
|
|
|
* we decide one day that VOP_GETATTR must operate on
|
|
|
|
* a locked vnode.
|
2011-10-30 17:24:13 +04:00
|
|
|
*
|
|
|
|
* XXX Should be useless now that VOP_GETATTR has been
|
|
|
|
* fixed to always require a shared lock at least.
|
2011-08-29 08:12:45 +04:00
|
|
|
*/
|
|
|
|
mutex_enter(&pn->pn_sizemtx);
|
|
|
|
|
2010-01-14 17:44:13 +03:00
|
|
|
REFPN(pn);
|
2007-03-20 13:21:58 +03:00
|
|
|
vap = ap->a_vap;
|
2006-11-18 15:39:48 +03:00
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
2012-04-18 04:42:50 +04:00
|
|
|
if (!TIMED_OUT(pn->pn_va_timeout)) {
|
|
|
|
update_va(vp, vap, pn->pn_va_cache,
|
|
|
|
NULL, NULL, SETATTR_CHSIZE);
|
2012-04-08 19:04:41 +04:00
|
|
|
goto out2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, getattr);
|
|
|
|
vattr_null(&getattr_msg->pvnr_va);
|
|
|
|
puffs_credcvt(&getattr_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_getattr, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_GETATTR, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_getattr, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2007-10-11 23:41:13 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
rvap = &getattr_msg->pvnr_va;
|
2007-05-09 01:39:03 +04:00
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
if (PUFFS_USE_FS_TTL(pmp))
|
|
|
|
va_ttl = &getattr_msg->pvnr_va_ttl;
|
2007-05-09 01:39:03 +04:00
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(vp, vap, rvap, va_ttl, NULL, SETATTR_CHSIZE);
|
2006-11-18 15:39:48 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
out:
|
|
|
|
PUFFS_MSG_RELEASE(getattr);
|
2012-04-08 19:04:41 +04:00
|
|
|
|
|
|
|
out2:
|
|
|
|
puffs_releasenode(pn);
|
2011-08-29 08:12:45 +04:00
|
|
|
|
|
|
|
mutex_exit(&pn->pn_sizemtx);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-07-22 22:22:49 +04:00
|
|
|
static int
|
2009-12-04 23:26:35 +03:00
|
|
|
dosetattr(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, int flags)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, setattr);
|
2007-10-11 16:31:45 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_node *pn = vp->v_data;
|
2009-12-04 23:26:35 +03:00
|
|
|
int error = 0;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
KASSERT(!(flags & SETATTR_CHSIZE) || mutex_owned(&pn->pn_sizemtx));
|
|
|
|
|
2007-08-12 23:44:15 +04:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY) &&
|
|
|
|
(vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL
|
|
|
|
|| vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL
|
|
|
|
|| vap->va_mode != (mode_t)VNOVAL))
|
|
|
|
return EROFS;
|
|
|
|
|
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY)
|
|
|
|
&& vp->v_type == VREG && vap->va_size != VNOVAL)
|
|
|
|
return EROFS;
|
|
|
|
|
2007-05-06 23:43:14 +04:00
|
|
|
/*
|
|
|
|
* Flush metacache first. If we are called with some explicit
|
|
|
|
* parameters, treat them as information overriding metacache
|
|
|
|
* information.
|
|
|
|
*/
|
|
|
|
if (pn->pn_stat & PNODE_METACACHE_MASK) {
|
|
|
|
if ((pn->pn_stat & PNODE_METACACHE_ATIME)
|
|
|
|
&& vap->va_atime.tv_sec == VNOVAL)
|
|
|
|
vap->va_atime = pn->pn_mc_atime;
|
|
|
|
if ((pn->pn_stat & PNODE_METACACHE_CTIME)
|
|
|
|
&& vap->va_ctime.tv_sec == VNOVAL)
|
|
|
|
vap->va_ctime = pn->pn_mc_ctime;
|
|
|
|
if ((pn->pn_stat & PNODE_METACACHE_MTIME)
|
|
|
|
&& vap->va_mtime.tv_sec == VNOVAL)
|
|
|
|
vap->va_mtime = pn->pn_mc_mtime;
|
|
|
|
if ((pn->pn_stat & PNODE_METACACHE_SIZE)
|
|
|
|
&& vap->va_size == VNOVAL)
|
|
|
|
vap->va_size = pn->pn_mc_size;
|
|
|
|
|
|
|
|
pn->pn_stat &= ~PNODE_METACACHE_MASK;
|
|
|
|
}
|
|
|
|
|
2012-04-08 19:04:41 +04:00
|
|
|
/*
|
|
|
|
* Flush attribute cache so that another thread do
|
|
|
|
* not get a stale value during the operation.
|
|
|
|
*/
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp))
|
|
|
|
pn->pn_va_timeout = 0;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, setattr);
|
|
|
|
(void)memcpy(&setattr_msg->pvnr_va, vap, sizeof(struct vattr));
|
|
|
|
puffs_credcvt(&setattr_msg->pvnr_cred, cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_setattr, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_SETATTR, VPTOPNC(vp));
|
2009-12-04 23:26:35 +03:00
|
|
|
if (flags & SETATTR_ASYNC)
|
|
|
|
puffs_msg_setfaf(park_setattr);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2009-12-04 23:26:35 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_setattr);
|
|
|
|
if ((flags & SETATTR_ASYNC) == 0)
|
|
|
|
error = puffs_msg_wait2(pmp, park_setattr, vp->v_data, NULL);
|
2012-04-08 19:04:41 +04:00
|
|
|
|
|
|
|
if ((error == 0) && PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &setattr_msg->pvnr_va_ttl;
|
|
|
|
struct vattr *rvap = &setattr_msg->pvnr_va;
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(vp, NULL, rvap, va_ttl, NULL, flags);
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(setattr);
|
2009-12-04 23:26:35 +03:00
|
|
|
if ((flags & SETATTR_ASYNC) == 0) {
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
} else {
|
|
|
|
error = 0;
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-07-22 22:22:49 +04:00
|
|
|
if (vap->va_size != VNOVAL) {
|
|
|
|
pn->pn_serversize = vap->va_size;
|
2009-12-04 23:26:35 +03:00
|
|
|
if (flags & SETATTR_CHSIZE)
|
2007-07-22 22:22:49 +04:00
|
|
|
uvm_vnp_setsize(vp, vap->va_size);
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
return 0;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-07-22 22:22:49 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_setattr(void *v)
|
2007-07-22 22:22:49 +04:00
|
|
|
{
|
|
|
|
struct vop_getattr_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2011-08-29 08:12:45 +04:00
|
|
|
struct puffs_node *pn = ap->a_vp->v_data;
|
|
|
|
int error;
|
2007-07-22 22:22:49 +04:00
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
mutex_enter(&pn->pn_sizemtx);
|
|
|
|
error = dosetattr(ap->a_vp, ap->a_vap, ap->a_cred, SETATTR_CHSIZE);
|
|
|
|
mutex_exit(&pn->pn_sizemtx);
|
|
|
|
|
|
|
|
return error;
|
2007-07-22 22:22:49 +04:00
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
static __inline int
|
|
|
|
doinact(struct puffs_mount *pmp, int iaflag)
|
2006-10-25 22:15:39 +04:00
|
|
|
{
|
|
|
|
|
2006-12-01 15:37:41 +03:00
|
|
|
if (EXISTSOP(pmp, INACTIVE))
|
2007-05-07 21:14:54 +04:00
|
|
|
if (pmp->pmp_flags & PUFFS_KFLAG_IAONDEMAND)
|
2007-10-02 05:17:17 +04:00
|
|
|
if (iaflag || ALLOPS(pmp))
|
2007-10-11 23:41:13 +04:00
|
|
|
return 1;
|
2007-05-07 21:14:54 +04:00
|
|
|
else
|
2007-10-11 23:41:13 +04:00
|
|
|
return 0;
|
2007-05-07 21:14:54 +04:00
|
|
|
else
|
2007-10-11 23:41:13 +04:00
|
|
|
return 1;
|
2007-05-07 21:14:54 +04:00
|
|
|
else
|
2007-10-11 23:41:13 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2007-05-07 21:14:54 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
static void
|
2008-01-29 00:06:36 +03:00
|
|
|
callinactive(struct puffs_mount *pmp, puffs_cookie_t ck, int iaflag)
|
2007-10-11 23:41:13 +04:00
|
|
|
{
|
2007-11-16 23:32:17 +03:00
|
|
|
int error;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, inactive);
|
|
|
|
|
|
|
|
if (doinact(pmp, iaflag)) {
|
|
|
|
PUFFS_MSG_ALLOC(vn, inactive);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_inactive, PUFFSOP_VN,
|
2008-01-29 00:06:36 +03:00
|
|
|
PUFFS_VN_INACTIVE, ck);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT(pmp, park_inactive, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(inactive);
|
|
|
|
}
|
2007-10-02 05:17:17 +04:00
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
/* XXX: callinactive can't setback */
|
2007-10-02 05:17:17 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_inactive(void *v)
|
2007-10-02 05:17:17 +04:00
|
|
|
{
|
|
|
|
struct vop_inactive_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, inactive);
|
2007-10-02 05:17:17 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2007-10-02 05:17:17 +04:00
|
|
|
struct puffs_node *pnode;
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
bool recycle = false;
|
2007-11-16 23:32:17 +03:00
|
|
|
int error;
|
2007-10-02 05:17:17 +04:00
|
|
|
|
|
|
|
pnode = vp->v_data;
|
2011-08-29 08:12:45 +04:00
|
|
|
mutex_enter(&pnode->pn_sizemtx);
|
2007-10-02 05:17:17 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
if (doinact(pmp, pnode->pn_stat & PNODE_DOINACT)) {
|
2009-12-04 23:26:35 +03:00
|
|
|
flushvncache(vp, 0, 0, false);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, inactive);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_inactive, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_INACTIVE, VPTOPNC(vp));
|
|
|
|
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_inactive, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(inactive);
|
|
|
|
}
|
2007-05-07 21:14:54 +04:00
|
|
|
pnode->pn_stat &= ~PNODE_DOINACT;
|
2006-10-25 22:15:39 +04:00
|
|
|
|
|
|
|
/*
|
2007-07-02 14:24:17 +04:00
|
|
|
* file server thinks it's gone? then don't be afraid care,
|
2006-10-25 22:15:39 +04:00
|
|
|
* node's life was already all it would ever be
|
|
|
|
*/
|
2009-11-05 22:22:57 +03:00
|
|
|
if (pnode->pn_stat & PNODE_NOREFS) {
|
|
|
|
pnode->pn_stat |= PNODE_DYING;
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
recycle = true;
|
2009-11-05 22:22:57 +03:00
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
/*
|
|
|
|
* Handle node TTL.
|
|
|
|
* If grace has already timed out, make it reclaimed.
|
|
|
|
* Otherwise, we queue its expiration by sop thread, so
|
|
|
|
* that it does not remain for ages in the freelist,
|
|
|
|
* holding memory in userspace, while we will have
|
|
|
|
* to look it up again anyway.
|
|
|
|
*/
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp) && !(vp->v_vflag & VV_ROOT) && !recycle) {
|
|
|
|
bool incache = !TIMED_OUT(pnode->pn_cn_timeout);
|
|
|
|
bool ingrace = !TIMED_OUT(pnode->pn_cn_grace);
|
|
|
|
bool reclaimqueued = pnode->pn_stat & PNODE_SOPEXP;
|
|
|
|
|
|
|
|
if (!incache && !ingrace && !reclaimqueued) {
|
|
|
|
pnode->pn_stat |= PNODE_DYING;
|
|
|
|
recycle = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!recycle && !reclaimqueued) {
|
|
|
|
struct puffs_sopreq *psopr;
|
|
|
|
int at = MAX(pnode->pn_cn_grace, pnode->pn_cn_timeout);
|
|
|
|
|
|
|
|
KASSERT(curlwp != uvm.pagedaemon_lwp);
|
|
|
|
psopr = kmem_alloc(sizeof(*psopr), KM_SLEEP);
|
|
|
|
psopr->psopr_ck = VPTOPNC(pnode->pn_vp);
|
|
|
|
psopr->psopr_sopreq = PUFFS_SOPREQ_EXPIRE;
|
|
|
|
psopr->psopr_at = at;
|
|
|
|
|
|
|
|
mutex_enter(&pmp->pmp_sopmtx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If thread has disapeared, just give up. The
|
|
|
|
* fs is being unmounted and the node will be
|
|
|
|
* be reclaimed anyway.
|
|
|
|
*
|
|
|
|
* Otherwise, we queue the request but do not
|
|
|
|
* immediatly signal the thread, as the node
|
|
|
|
* has not been expired yet.
|
|
|
|
*/
|
|
|
|
if (pmp->pmp_sopthrcount == 0) {
|
|
|
|
kmem_free(psopr, sizeof(*psopr));
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&pmp->pmp_sopslowreqs,
|
|
|
|
psopr, psopr_entries);
|
|
|
|
pnode->pn_stat |= PNODE_SOPEXP;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_exit(&pmp->pmp_sopmtx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*ap->a_recycle = recycle;
|
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
mutex_exit(&pnode->pn_sizemtx);
|
2010-06-24 16:58:48 +04:00
|
|
|
VOP_UNLOCK(vp);
|
2006-10-25 22:15:39 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-02 05:17:17 +04:00
|
|
|
static void
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
callreclaim(struct puffs_mount *pmp, puffs_cookie_t ck, int nlookup)
|
2007-10-02 05:17:17 +04:00
|
|
|
{
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, reclaim);
|
2007-10-02 05:17:17 +04:00
|
|
|
|
|
|
|
if (!EXISTSOP(pmp, RECLAIM))
|
|
|
|
return;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, reclaim);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
reclaim_msg->pvnr_nlookup = nlookup;
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_msg_setfaf(park_reclaim);
|
2008-01-29 00:06:36 +03:00
|
|
|
puffs_msg_setinfo(park_reclaim, PUFFSOP_VN, PUFFS_VN_RECLAIM, ck);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_reclaim);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(reclaim);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
return;
|
2007-10-02 05:17:17 +04:00
|
|
|
}
|
|
|
|
|
2007-01-17 00:58:49 +03:00
|
|
|
/*
|
|
|
|
* always FAF, we don't really care if the server wants to fail to
|
|
|
|
* reclaim the node or not
|
|
|
|
*/
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_reclaim(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_reclaim_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
} */ *ap = v;
|
2007-10-02 05:17:17 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2009-09-30 22:19:17 +04:00
|
|
|
struct puffs_node *pnode = vp->v_data;
|
|
|
|
bool notifyserver = true;
|
2006-12-01 15:37:41 +03:00
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
/*
|
|
|
|
* first things first: check if someone is trying to reclaim the
|
|
|
|
* root vnode. do not allow that to travel to userspace.
|
|
|
|
* Note that we don't need to take the lock similarly to
|
|
|
|
* puffs_root(), since there is only one of us.
|
|
|
|
*/
|
2007-10-11 00:42:20 +04:00
|
|
|
if (vp->v_vflag & VV_ROOT) {
|
2007-03-29 20:04:26 +04:00
|
|
|
mutex_enter(&pmp->pmp_lock);
|
2007-01-25 20:43:56 +03:00
|
|
|
KASSERT(pmp->pmp_root != NULL);
|
2006-10-23 02:43:23 +04:00
|
|
|
pmp->pmp_root = NULL;
|
2007-03-29 20:04:26 +04:00
|
|
|
mutex_exit(&pmp->pmp_lock);
|
2009-09-30 22:19:17 +04:00
|
|
|
notifyserver = false;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2009-09-30 22:19:17 +04:00
|
|
|
/*
|
|
|
|
* purge info from kernel before issueing FAF, since we
|
|
|
|
* don't really know when we'll get around to it after
|
|
|
|
* that and someone might race us into node creation
|
|
|
|
*/
|
|
|
|
mutex_enter(&pmp->pmp_lock);
|
|
|
|
LIST_REMOVE(pnode, pn_hashent);
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (PUFFS_USE_NAMECACHE(pmp))
|
|
|
|
cache_purge(vp);
|
2009-09-30 22:19:17 +04:00
|
|
|
mutex_exit(&pmp->pmp_lock);
|
|
|
|
|
- Improve PUFFS_KFLAG_CACHE_FS_TTL by reclaiming older inactive nodes.
The normal kernel behavior is to retain inactive nodes in the freelist
until it runs out of vnodes. This has some merit for local filesystems,
where the cost of an allocation is about the same as the cost of a
lookup. But that situation is not true for distributed filesystems.
On the other hand, keeping inactive nodes for a long time hold memory
in the file server process, and when the kernel runs out of vnodes, it
produce reclaim avalanches that increase lattency for other operations.
We do not reclaim inactive vnodes immediatly either, as they may be
looked up again shortly. Instead we introduce a grace time and we
reclaim nodes that have been inactive beyond the grace time.
- Fix lookup/reclaim race condition.
The above improvement undercovered a race condition between lookup and
reclaim. If we reclaimed a vnode associated with a userland cookie while
a lookup returning that same cookiewas inprogress, then the kernel ends
up with a vnode associated with a cookie that has been reclaimed in
userland. Next operation on the cookie will crash (or at least confuse)
the filesystem.
We fix this by introducing a lookup count in kernel and userland. On
reclaim, the kernel sends the count, which enable userland to detect
situation where it initiated a lookup that is not completed in kernel.
In such a situation, the reclaim must be ignored, as the node is about
to be looked up again.
2012-07-21 09:17:10 +04:00
|
|
|
if (notifyserver) {
|
|
|
|
int nlookup = VPTOPP(vp)->pn_nlookup;
|
|
|
|
|
|
|
|
callreclaim(MPTOPUFFSMP(vp->v_mount), VPTOPNC(vp), nlookup);
|
|
|
|
}
|
2009-09-30 22:19:17 +04:00
|
|
|
|
2007-10-02 05:17:17 +04:00
|
|
|
puffs_putvnode(vp);
|
2011-05-19 07:11:55 +04:00
|
|
|
vp->v_data = NULL;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-04-20 15:36:25 +04:00
|
|
|
#define CSIZE sizeof(**ap->a_cookies)
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_readdir(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_readdir_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
int *a_eofflag;
|
|
|
|
off_t **a_cookies;
|
|
|
|
int *a_ncookies;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, readdir);
|
2007-10-02 01:09:07 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2007-04-20 15:56:35 +04:00
|
|
|
size_t argsize, tomove, cookiemem, cookiesmax;
|
2006-10-23 02:43:23 +04:00
|
|
|
struct uio *uio = ap->a_uio;
|
2007-07-19 13:38:01 +04:00
|
|
|
size_t howmuch, resid;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-07-19 13:38:01 +04:00
|
|
|
/*
|
|
|
|
* ok, so we need: resid + cookiemem = maxreq
|
|
|
|
* => resid + cookiesize * (resid/minsize) = maxreq
|
|
|
|
* => resid + cookiesize/minsize * resid = maxreq
|
|
|
|
* => (cookiesize/minsize + 1) * resid = maxreq
|
|
|
|
* => resid = maxreq / (cookiesize/minsize + 1)
|
|
|
|
*
|
|
|
|
* Since cookiesize <= minsize and we're not very big on floats,
|
|
|
|
* we approximate that to be 1. Therefore:
|
|
|
|
*
|
|
|
|
* resid = maxreq / 2;
|
|
|
|
*
|
|
|
|
* Well, at least we didn't have to use differential equations
|
|
|
|
* or the Gram-Schmidt process.
|
|
|
|
*
|
|
|
|
* (yes, I'm very afraid of this)
|
|
|
|
*/
|
|
|
|
KASSERT(CSIZE <= _DIRENT_MINSIZE((struct dirent *)0));
|
|
|
|
|
2007-04-12 01:03:05 +04:00
|
|
|
if (ap->a_cookies) {
|
|
|
|
KASSERT(ap->a_ncookies != NULL);
|
2007-04-16 17:03:26 +04:00
|
|
|
if (pmp->pmp_args.pa_fhsize == 0)
|
2007-04-12 01:03:05 +04:00
|
|
|
return EOPNOTSUPP;
|
2007-07-19 13:38:01 +04:00
|
|
|
resid = PUFFS_TOMOVE(uio->uio_resid, pmp) / 2;
|
|
|
|
cookiesmax = resid/_DIRENT_MINSIZE((struct dirent *)0);
|
2007-04-12 01:03:05 +04:00
|
|
|
cookiemem = ALIGN(cookiesmax*CSIZE); /* play safe */
|
|
|
|
} else {
|
2007-07-19 13:38:01 +04:00
|
|
|
resid = PUFFS_TOMOVE(uio->uio_resid, pmp);
|
2007-04-12 01:03:05 +04:00
|
|
|
cookiesmax = 0;
|
|
|
|
cookiemem = 0;
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
argsize = sizeof(struct puffs_vnmsg_readdir);
|
2007-07-19 13:38:01 +04:00
|
|
|
tomove = resid + cookiemem;
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_msgmem_alloc(argsize + tomove, &park_readdir,
|
2008-01-03 01:37:19 +03:00
|
|
|
(void *)&readdir_msg, 1);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
|
|
|
puffs_credcvt(&readdir_msg->pvnr_cred, ap->a_cred);
|
|
|
|
readdir_msg->pvnr_offset = uio->uio_offset;
|
|
|
|
readdir_msg->pvnr_resid = resid;
|
|
|
|
readdir_msg->pvnr_ncookies = cookiesmax;
|
|
|
|
readdir_msg->pvnr_eofflag = 0;
|
|
|
|
readdir_msg->pvnr_dentoff = cookiemem;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_readdir, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_READDIR, VPTOPNC(vp));
|
|
|
|
puffs_msg_setdelta(park_readdir, tomove);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_readdir, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* userspace is cheating? */
|
2007-10-11 23:41:13 +04:00
|
|
|
if (readdir_msg->pvnr_resid > resid) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_READDIR, E2BIG,
|
2007-10-02 01:09:07 +04:00
|
|
|
"resid grew", VPTOPNC(vp));
|
|
|
|
ERROUT(EPROTO);
|
|
|
|
}
|
2007-10-11 23:41:13 +04:00
|
|
|
if (readdir_msg->pvnr_ncookies > cookiesmax) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_READDIR, E2BIG,
|
2007-10-02 01:09:07 +04:00
|
|
|
"too many cookies", VPTOPNC(vp));
|
2007-09-28 01:14:49 +04:00
|
|
|
ERROUT(EPROTO);
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-04-12 01:03:05 +04:00
|
|
|
/* check eof */
|
2007-10-11 23:41:13 +04:00
|
|
|
if (readdir_msg->pvnr_eofflag)
|
2007-04-12 01:03:05 +04:00
|
|
|
*ap->a_eofflag = 1;
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
/* bouncy-wouncy with the directory data */
|
2007-10-11 23:41:13 +04:00
|
|
|
howmuch = resid - readdir_msg->pvnr_resid;
|
2007-04-12 01:03:05 +04:00
|
|
|
|
|
|
|
/* force eof if no data was returned (getcwd() needs this) */
|
2007-01-07 03:53:13 +03:00
|
|
|
if (howmuch == 0) {
|
|
|
|
*ap->a_eofflag = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
2007-04-12 01:03:05 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
error = uiomove(readdir_msg->pvnr_data + cookiemem, howmuch, uio);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
|
|
|
goto out;
|
2007-04-12 01:03:05 +04:00
|
|
|
|
|
|
|
/* provide cookies to caller if so desired */
|
|
|
|
if (ap->a_cookies) {
|
2011-10-18 19:39:09 +04:00
|
|
|
KASSERT(curlwp != uvm.pagedaemon_lwp);
|
2007-10-11 23:41:13 +04:00
|
|
|
*ap->a_cookies = malloc(readdir_msg->pvnr_ncookies*CSIZE,
|
2007-04-12 01:03:05 +04:00
|
|
|
M_TEMP, M_WAITOK);
|
2007-10-11 23:41:13 +04:00
|
|
|
*ap->a_ncookies = readdir_msg->pvnr_ncookies;
|
|
|
|
memcpy(*ap->a_cookies, readdir_msg->pvnr_data,
|
2007-04-12 01:03:05 +04:00
|
|
|
*ap->a_ncookies*CSIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* next readdir starts here */
|
2007-10-11 23:41:13 +04:00
|
|
|
uio->uio_offset = readdir_msg->pvnr_offset;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
out:
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_msgmem_release(park_readdir);
|
2006-10-23 02:43:23 +04:00
|
|
|
return error;
|
|
|
|
}
|
2007-04-12 01:03:05 +04:00
|
|
|
#undef CSIZE
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-05-18 17:53:08 +04:00
|
|
|
/*
|
|
|
|
* poll works by consuming the bitmask in pn_revents. If there are
|
|
|
|
* events available, poll returns immediately. If not, it issues a
|
|
|
|
* poll to userspace, selrecords itself and returns with no available
|
|
|
|
* events. When the file server returns, it executes puffs_parkdone_poll(),
|
|
|
|
* where available events are added to the bitmask. selnotify() is
|
|
|
|
* then also executed by that function causing us to enter here again
|
|
|
|
* and hopefully find the missing bits (unless someone got them first,
|
|
|
|
* in which case it starts all over again).
|
|
|
|
*/
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_poll(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_poll_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_events;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, poll);
|
2007-05-18 17:53:08 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
struct puffs_node *pn = vp->v_data;
|
2007-11-16 23:32:17 +03:00
|
|
|
int events, error;
|
2007-05-18 17:53:08 +04:00
|
|
|
|
|
|
|
if (EXISTSOP(pmp, POLL)) {
|
|
|
|
mutex_enter(&pn->pn_mtx);
|
|
|
|
events = pn->pn_revents & ap->a_events;
|
|
|
|
if (events & ap->a_events) {
|
|
|
|
pn->pn_revents &= ~ap->a_events;
|
|
|
|
mutex_exit(&pn->pn_mtx);
|
|
|
|
|
|
|
|
return events;
|
|
|
|
} else {
|
|
|
|
puffs_referencenode(pn);
|
|
|
|
mutex_exit(&pn->pn_mtx);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, poll);
|
|
|
|
poll_msg->pvnr_events = ap->a_events;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_poll, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_POLL, VPTOPNC(vp));
|
|
|
|
puffs_msg_setcall(park_poll, puffs_parkdone_poll, pn);
|
2007-11-26 22:01:26 +03:00
|
|
|
selrecord(curlwp, &pn->pn_sel);
|
2007-11-16 23:32:17 +03:00
|
|
|
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_poll, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(poll);
|
2007-05-18 17:53:08 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return genfs_poll(v);
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2009-12-04 23:26:35 +03:00
|
|
|
static int
|
|
|
|
flushvncache(struct vnode *vp, off_t offlo, off_t offhi, bool wait)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
2009-12-04 23:26:35 +03:00
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct vattr va;
|
2009-12-04 23:26:35 +03:00
|
|
|
int pflags, error;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-05-06 23:43:14 +04:00
|
|
|
/* flush out information from our metacache, see vop_setattr */
|
2009-11-05 22:22:57 +03:00
|
|
|
if (pn->pn_stat & PNODE_METACACHE_MASK
|
|
|
|
&& (pn->pn_stat & PNODE_DYING) == 0) {
|
2007-03-20 13:21:58 +03:00
|
|
|
vattr_null(&va);
|
2009-12-04 23:26:35 +03:00
|
|
|
error = dosetattr(vp, &va, FSCRED,
|
|
|
|
SETATTR_CHSIZE | (wait ? 0 : SETATTR_ASYNC));
|
2007-03-20 13:21:58 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* flush pages to avoid being overly dirty
|
|
|
|
*/
|
2007-03-20 13:21:58 +03:00
|
|
|
pflags = PGO_CLEANIT;
|
2009-12-04 23:26:35 +03:00
|
|
|
if (wait)
|
2007-03-20 13:21:58 +03:00
|
|
|
pflags |= PGO_SYNCIO;
|
2012-04-08 19:04:41 +04:00
|
|
|
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2009-12-04 23:26:35 +03:00
|
|
|
return VOP_PUTPAGES(vp, trunc_page(offlo), round_page(offhi), pflags);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
puffs_vnop_fsync(void *v)
|
|
|
|
{
|
|
|
|
struct vop_fsync_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
int a_flags;
|
|
|
|
off_t a_offlo;
|
|
|
|
off_t a_offhi;
|
|
|
|
} */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, fsync);
|
2012-01-17 13:30:16 +04:00
|
|
|
struct vnode *vp;
|
|
|
|
struct puffs_node *pn;
|
|
|
|
struct puffs_mount *pmp;
|
2009-12-04 23:26:35 +03:00
|
|
|
int error, dofaf;
|
|
|
|
|
2012-01-17 13:30:16 +04:00
|
|
|
vp = ap->a_vp;
|
|
|
|
KASSERT(vp != NULL);
|
|
|
|
pn = VPTOPP(vp);
|
|
|
|
KASSERT(pn != NULL);
|
|
|
|
pmp = MPTOPUFFSMP(vp->v_mount);
|
2011-09-21 19:36:33 +04:00
|
|
|
if (ap->a_flags & FSYNC_WAIT) {
|
|
|
|
mutex_enter(&pn->pn_sizemtx);
|
|
|
|
} else {
|
|
|
|
if (mutex_tryenter(&pn->pn_sizemtx) == 0)
|
|
|
|
return EDEADLK;
|
|
|
|
}
|
|
|
|
|
2009-12-04 23:26:35 +03:00
|
|
|
error = flushvncache(vp, ap->a_offlo, ap->a_offhi,
|
|
|
|
(ap->a_flags & FSYNC_WAIT) == FSYNC_WAIT);
|
2006-11-08 01:10:18 +03:00
|
|
|
if (error)
|
2011-08-29 08:12:45 +04:00
|
|
|
goto out;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-01-11 19:08:58 +03:00
|
|
|
/*
|
|
|
|
* HELLO! We exit already here if the user server does not
|
2007-01-21 19:29:31 +03:00
|
|
|
* support fsync OR if we should call fsync for a node which
|
|
|
|
* has references neither in the kernel or the fs server.
|
|
|
|
* Otherwise we continue to issue fsync() forward.
|
2007-01-11 19:08:58 +03:00
|
|
|
*/
|
2011-08-29 08:12:45 +04:00
|
|
|
error = 0;
|
2009-11-05 22:22:57 +03:00
|
|
|
if (!EXISTSOP(pmp, FSYNC) || (pn->pn_stat & PNODE_DYING))
|
2011-08-29 08:12:45 +04:00
|
|
|
goto out;
|
2007-01-11 19:08:58 +03:00
|
|
|
|
2007-01-27 01:59:49 +03:00
|
|
|
dofaf = (ap->a_flags & FSYNC_WAIT) == 0 || ap->a_flags == FSYNC_LAZY;
|
2007-01-19 17:59:50 +03:00
|
|
|
/*
|
|
|
|
* We abuse VXLOCK to mean "vnode is going to die", so we issue
|
|
|
|
* only FAFs for those. Otherwise there's a danger of deadlock,
|
|
|
|
* since the execution context here might be the user server
|
|
|
|
* doing some operation on another fs, which in turn caused a
|
|
|
|
* vnode to be reclaimed from the freelist for this fs.
|
|
|
|
*/
|
|
|
|
if (dofaf == 0) {
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2007-10-11 00:42:20 +04:00
|
|
|
if (vp->v_iflag & VI_XLOCK)
|
2007-01-19 17:59:50 +03:00
|
|
|
dofaf = 1;
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_exit(vp->v_interlock);
|
2007-01-19 17:59:50 +03:00
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, fsync);
|
|
|
|
if (dofaf)
|
|
|
|
puffs_msg_setfaf(park_fsync);
|
2007-01-11 19:08:58 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_credcvt(&fsync_msg->pvnr_cred, ap->a_cred);
|
|
|
|
fsync_msg->pvnr_flags = ap->a_flags;
|
|
|
|
fsync_msg->pvnr_offlo = ap->a_offlo;
|
|
|
|
fsync_msg->pvnr_offhi = ap->a_offhi;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_fsync, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_FSYNC, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_fsync, vp->v_data, NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(fsync);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
out:
|
|
|
|
mutex_exit(&pn->pn_sizemtx);
|
2006-11-08 01:10:18 +03:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_seek(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_seek_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
off_t a_oldoff;
|
|
|
|
off_t a_newoff;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, seek);
|
2007-10-02 01:09:07 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-10-11 16:31:45 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2007-10-02 01:09:07 +04:00
|
|
|
int error;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, seek);
|
|
|
|
seek_msg->pvnr_oldoff = ap->a_oldoff;
|
|
|
|
seek_msg->pvnr_newoff = ap->a_newoff;
|
|
|
|
puffs_credcvt(&seek_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_seek, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_SEEK, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_seek, vp->v_data, NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(seek);
|
2007-10-11 16:31:45 +04:00
|
|
|
return checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-10-02 05:17:17 +04:00
|
|
|
static int
|
2008-01-29 00:06:36 +03:00
|
|
|
callremove(struct puffs_mount *pmp, puffs_cookie_t dck, puffs_cookie_t ck,
|
2007-10-02 05:17:17 +04:00
|
|
|
struct componentname *cnp)
|
|
|
|
{
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, remove);
|
2007-10-02 05:17:17 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, remove);
|
2008-01-29 00:06:36 +03:00
|
|
|
remove_msg->pvnr_cookie_targ = ck;
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_makecn(&remove_msg->pvnr_cn, &remove_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2008-01-29 00:06:36 +03:00
|
|
|
puffs_msg_setinfo(park_remove, PUFFSOP_VN, PUFFS_VN_REMOVE, dck);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT(pmp, park_remove, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(remove);
|
2007-10-02 05:17:17 +04:00
|
|
|
|
|
|
|
return checkerr(pmp, error, __func__);
|
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
/*
|
|
|
|
* XXX: can't use callremove now because can't catch setbacks with
|
2007-11-18 00:30:48 +03:00
|
|
|
* it due to lack of a pnode argument.
|
2007-10-11 23:41:13 +04:00
|
|
|
*/
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_remove(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_remove_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, remove);
|
2007-10-02 05:17:17 +04:00
|
|
|
struct vnode *dvp = ap->a_dvp;
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct mount *mp = dvp->v_mount;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(mp);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, remove);
|
|
|
|
remove_msg->pvnr_cookie_targ = VPTOPNC(vp);
|
|
|
|
puffs_makecn(&remove_msg->pvnr_cn, &remove_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_remove, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_REMOVE, VPTOPNC(dvp));
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_remove);
|
|
|
|
REFPN_AND_UNLOCKVP(dvp, dpn);
|
2007-10-02 05:17:17 +04:00
|
|
|
if (dvp == vp)
|
2007-11-18 00:30:48 +03:00
|
|
|
REFPN(pn);
|
2007-01-01 23:14:36 +03:00
|
|
|
else
|
2007-11-18 00:30:48 +03:00
|
|
|
REFPN_AND_UNLOCKVP(vp, pn);
|
|
|
|
error = puffs_msg_wait2(pmp, park_remove, dpn, pn);
|
|
|
|
|
|
|
|
PUFFS_MSG_RELEASE(remove);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
RELEPN_AND_VP(dvp, dpn);
|
|
|
|
RELEPN_AND_VP(vp, pn);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_mkdir(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_mkdir_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, mkdir);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
2007-10-21 23:43:52 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct mount *mp = dvp->v_mount;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(mp);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, mkdir);
|
|
|
|
puffs_makecn(&mkdir_msg->pvnr_cn, &mkdir_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-10-11 23:41:13 +04:00
|
|
|
mkdir_msg->pvnr_va = *ap->a_vap;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_mkdir, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_MKDIR, VPTOPNC(dvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2009-09-30 22:19:17 +04:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_mkdir, dvp->v_data, NULL, error);
|
2007-11-18 00:30:48 +03:00
|
|
|
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2006-10-26 17:42:21 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
error = puffs_newnode(mp, dvp, ap->a_vpp,
|
2007-10-21 23:43:52 +04:00
|
|
|
mkdir_msg->pvnr_newnode, cnp, VDIR, 0);
|
2012-04-08 19:04:41 +04:00
|
|
|
if (error) {
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_MKDIR, dpn->pn_cookie,
|
2007-10-21 23:43:52 +04:00
|
|
|
mkdir_msg->pvnr_newnode, cnp);
|
2012-04-08 19:04:41 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &mkdir_msg->pvnr_va_ttl;
|
|
|
|
struct timespec *cn_ttl = &mkdir_msg->pvnr_cn_ttl;
|
|
|
|
struct vattr *rvap = &mkdir_msg->pvnr_va;
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(*ap->a_vpp, NULL, rvap,
|
|
|
|
va_ttl, cn_ttl, SETATTR_CHSIZE);
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
2006-10-26 17:42:21 +04:00
|
|
|
|
|
|
|
out:
|
2009-09-30 22:19:17 +04:00
|
|
|
vput(dvp);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(mkdir);
|
2006-10-26 17:42:21 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-10-02 05:17:17 +04:00
|
|
|
static int
|
2008-01-29 00:06:36 +03:00
|
|
|
callrmdir(struct puffs_mount *pmp, puffs_cookie_t dck, puffs_cookie_t ck,
|
2007-10-02 05:17:17 +04:00
|
|
|
struct componentname *cnp)
|
|
|
|
{
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, rmdir);
|
2007-10-02 05:17:17 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, rmdir);
|
2008-01-29 00:06:36 +03:00
|
|
|
rmdir_msg->pvnr_cookie_targ = ck;
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_makecn(&rmdir_msg->pvnr_cn, &rmdir_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2008-01-29 00:06:36 +03:00
|
|
|
puffs_msg_setinfo(park_rmdir, PUFFSOP_VN, PUFFS_VN_RMDIR, dck);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT(pmp, park_rmdir, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(rmdir);
|
2007-10-02 05:17:17 +04:00
|
|
|
|
|
|
|
return checkerr(pmp, error, __func__);
|
|
|
|
}
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_rmdir(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_rmdir_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, rmdir);
|
2007-10-02 05:17:17 +04:00
|
|
|
struct vnode *dvp = ap->a_dvp;
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2007-10-02 05:17:17 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, rmdir);
|
|
|
|
rmdir_msg->pvnr_cookie_targ = VPTOPNC(vp);
|
|
|
|
puffs_makecn(&rmdir_msg->pvnr_cn, &rmdir_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_rmdir, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_RMDIR, VPTOPNC(dvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_rmdir);
|
|
|
|
REFPN_AND_UNLOCKVP(dvp, dpn);
|
|
|
|
REFPN_AND_UNLOCKVP(vp, pn);
|
|
|
|
error = puffs_msg_wait2(pmp, park_rmdir, dpn, pn);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(rmdir);
|
2006-12-30 04:29:03 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
/* XXX: some call cache_purge() *for both vnodes* here, investigate */
|
2007-11-18 00:30:48 +03:00
|
|
|
RELEPN_AND_VP(dvp, dpn);
|
|
|
|
RELEPN_AND_VP(vp, pn);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_link(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_link_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct componentname *a_cnp;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, link);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
|
2007-10-21 23:43:52 +04:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, link);
|
|
|
|
link_msg->pvnr_cookie_targ = VPTOPNC(vp);
|
|
|
|
puffs_makecn(&link_msg->pvnr_cn, &link_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_link, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_LINK, VPTOPNC(dvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_link);
|
|
|
|
REFPN_AND_UNLOCKVP(dvp, dpn);
|
|
|
|
REFPN(pn);
|
|
|
|
error = puffs_msg_wait2(pmp, park_link, dpn, pn);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(link);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-03-20 13:21:58 +03:00
|
|
|
/*
|
|
|
|
* XXX: stay in touch with the cache. I don't like this, but
|
|
|
|
* don't have a better solution either. See also puffs_rename().
|
|
|
|
*/
|
|
|
|
if (error == 0)
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(pn, PUFFS_UPDATECTIME, 0);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
RELEPN_AND_VP(dvp, dpn);
|
|
|
|
puffs_releasenode(pn);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_symlink(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_symlink_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
struct vattr *a_vap;
|
|
|
|
char *a_target;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, symlink);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
2007-11-18 00:30:48 +03:00
|
|
|
struct puffs_node *dpn = VPTOPP(dvp);
|
|
|
|
struct mount *mp = dvp->v_mount;
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
|
2007-11-18 00:30:48 +03:00
|
|
|
struct componentname *cnp = ap->a_cnp;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
|
|
|
*ap->a_vpp = NULL;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, symlink);
|
|
|
|
puffs_makecn(&symlink_msg->pvnr_cn, &symlink_msg->pvnr_cn_cred,
|
2007-12-08 22:57:02 +03:00
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-10-11 23:41:13 +04:00
|
|
|
symlink_msg->pvnr_va = *ap->a_vap;
|
|
|
|
(void)strlcpy(symlink_msg->pvnr_link, ap->a_target,
|
|
|
|
sizeof(symlink_msg->pvnr_link));
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_symlink, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_SYMLINK, VPTOPNC(dvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2009-09-30 22:19:17 +04:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_symlink, dvp->v_data, NULL, error);
|
2007-11-18 00:30:48 +03:00
|
|
|
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2006-10-26 17:42:21 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-18 00:30:48 +03:00
|
|
|
error = puffs_newnode(mp, dvp, ap->a_vpp,
|
|
|
|
symlink_msg->pvnr_newnode, cnp, VLNK, 0);
|
2012-04-08 19:04:41 +04:00
|
|
|
if (error) {
|
2007-11-18 00:30:48 +03:00
|
|
|
puffs_abortbutton(pmp, PUFFS_ABORT_SYMLINK, dpn->pn_cookie,
|
|
|
|
symlink_msg->pvnr_newnode, cnp);
|
2012-04-08 19:04:41 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PUFFS_USE_FS_TTL(pmp)) {
|
|
|
|
struct timespec *va_ttl = &symlink_msg->pvnr_va_ttl;
|
|
|
|
struct timespec *cn_ttl = &symlink_msg->pvnr_cn_ttl;
|
|
|
|
struct vattr *rvap = &symlink_msg->pvnr_va;
|
|
|
|
|
2012-04-18 04:42:50 +04:00
|
|
|
update_va(*ap->a_vpp, NULL, rvap,
|
|
|
|
va_ttl, cn_ttl, SETATTR_CHSIZE);
|
2012-04-08 19:04:41 +04:00
|
|
|
}
|
2006-10-26 17:42:21 +04:00
|
|
|
|
|
|
|
out:
|
2009-09-30 22:19:17 +04:00
|
|
|
vput(dvp);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(symlink);
|
2007-07-01 19:30:15 +04:00
|
|
|
|
2006-10-26 17:42:21 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_readlink(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_readlink_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, readlink);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-09-28 01:14:49 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(ap->a_vp->v_mount);
|
2007-02-15 22:50:54 +03:00
|
|
|
size_t linklen;
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, readlink);
|
|
|
|
puffs_credcvt(&readlink_msg->pvnr_cred, ap->a_cred);
|
|
|
|
linklen = sizeof(readlink_msg->pvnr_link);
|
|
|
|
readlink_msg->pvnr_linklen = linklen;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_readlink, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_READLINK, VPTOPNC(vp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_readlink, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
if (error)
|
2007-10-11 23:41:13 +04:00
|
|
|
goto out;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-02-15 22:50:54 +03:00
|
|
|
/* bad bad user file server */
|
2007-10-11 23:41:13 +04:00
|
|
|
if (readlink_msg->pvnr_linklen > linklen) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_READLINK, E2BIG,
|
2007-10-02 01:09:07 +04:00
|
|
|
"linklen too big", VPTOPNC(ap->a_vp));
|
2007-10-11 23:41:13 +04:00
|
|
|
error = EPROTO;
|
|
|
|
goto out;
|
2007-09-28 01:14:49 +04:00
|
|
|
}
|
2007-02-15 22:50:54 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
error = uiomove(&readlink_msg->pvnr_link, readlink_msg->pvnr_linklen,
|
2006-10-23 02:43:23 +04:00
|
|
|
ap->a_uio);
|
2007-10-11 23:41:13 +04:00
|
|
|
out:
|
|
|
|
PUFFS_MSG_RELEASE(readlink);
|
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_rename(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_rename_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_fdvp;
|
|
|
|
struct vnode *a_fvp;
|
|
|
|
struct componentname *a_fcnp;
|
|
|
|
struct vnode *a_tdvp;
|
|
|
|
struct vnode *a_tvp;
|
|
|
|
struct componentname *a_tcnp;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, rename);
|
2009-11-19 19:21:04 +03:00
|
|
|
struct vnode *fdvp = ap->a_fdvp, *fvp = ap->a_fvp;
|
|
|
|
struct vnode *tdvp = ap->a_tdvp, *tvp = ap->a_tvp;
|
2007-11-18 00:55:29 +03:00
|
|
|
struct puffs_node *fpn = ap->a_fvp->v_data;
|
2007-11-16 23:32:17 +03:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(fdvp->v_mount);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
2009-11-19 19:21:04 +03:00
|
|
|
bool doabort = true;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2009-11-19 19:21:04 +03:00
|
|
|
if ((fvp->v_mount != tdvp->v_mount) ||
|
|
|
|
(tvp && (fvp->v_mount != tvp->v_mount))) {
|
2007-06-26 16:50:49 +04:00
|
|
|
ERROUT(EXDEV);
|
2009-11-19 19:21:04 +03:00
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, rename);
|
2009-11-19 19:21:04 +03:00
|
|
|
rename_msg->pvnr_cookie_src = VPTOPNC(fvp);
|
|
|
|
rename_msg->pvnr_cookie_targdir = VPTOPNC(tdvp);
|
|
|
|
if (tvp)
|
|
|
|
rename_msg->pvnr_cookie_targ = VPTOPNC(tvp);
|
2006-10-23 02:43:23 +04:00
|
|
|
else
|
2007-10-11 23:41:13 +04:00
|
|
|
rename_msg->pvnr_cookie_targ = NULL;
|
2007-12-08 22:57:02 +03:00
|
|
|
puffs_makecn(&rename_msg->pvnr_cn_src, &rename_msg->pvnr_cn_src_cred,
|
2007-07-02 02:54:16 +04:00
|
|
|
ap->a_fcnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-12-08 22:57:02 +03:00
|
|
|
puffs_makecn(&rename_msg->pvnr_cn_targ, &rename_msg->pvnr_cn_targ_cred,
|
2007-07-02 02:54:16 +04:00
|
|
|
ap->a_tcnp, PUFFS_USE_FULLPNBUF(pmp));
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_rename, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_RENAME, VPTOPNC(fdvp));
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_rename, fdvp->v_data, NULL, error);
|
2009-11-19 19:21:04 +03:00
|
|
|
doabort = false;
|
|
|
|
PUFFS_MSG_RELEASE(rename);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-03-20 13:21:58 +03:00
|
|
|
/*
|
|
|
|
* XXX: stay in touch with the cache. I don't like this, but
|
|
|
|
* don't have a better solution either. See also puffs_link().
|
|
|
|
*/
|
|
|
|
if (error == 0)
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(fpn, PUFFS_UPDATECTIME, 0);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
out:
|
2009-11-19 19:21:04 +03:00
|
|
|
if (doabort)
|
|
|
|
VOP_ABORTOP(tdvp, ap->a_tcnp);
|
|
|
|
if (tvp != NULL)
|
|
|
|
vput(tvp);
|
|
|
|
if (tdvp == tvp)
|
|
|
|
vrele(tdvp);
|
2007-01-02 03:14:15 +03:00
|
|
|
else
|
2009-11-19 19:21:04 +03:00
|
|
|
vput(tdvp);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2009-11-19 19:21:04 +03:00
|
|
|
if (doabort)
|
|
|
|
VOP_ABORTOP(fdvp, ap->a_fcnp);
|
|
|
|
vrele(fdvp);
|
|
|
|
vrele(fvp);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
#define RWARGS(cont, iofl, move, offset, creds) \
|
|
|
|
(cont)->pvnr_ioflag = (iofl); \
|
|
|
|
(cont)->pvnr_resid = (move); \
|
|
|
|
(cont)->pvnr_offset = (offset); \
|
|
|
|
puffs_credcvt(&(cont)->pvnr_cred, creds)
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_read(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_read_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, read);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
struct uio *uio = ap->a_uio;
|
2006-10-23 02:43:23 +04:00
|
|
|
size_t tomove, argsize;
|
2006-11-08 01:10:18 +03:00
|
|
|
vsize_t bytelen;
|
2008-11-26 23:17:33 +03:00
|
|
|
int error;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
read_msg = NULL;
|
2006-10-23 02:43:23 +04:00
|
|
|
error = 0;
|
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
/* std sanity */
|
|
|
|
if (uio->uio_resid == 0)
|
|
|
|
return 0;
|
|
|
|
if (uio->uio_offset < 0)
|
|
|
|
return EINVAL;
|
|
|
|
|
2007-06-25 02:16:03 +04:00
|
|
|
if (vp->v_type == VREG && PUFFS_USE_PAGECACHE(pmp)) {
|
2006-11-08 01:10:18 +03:00
|
|
|
const int advice = IO_ADV_DECODE(ap->a_ioflag);
|
|
|
|
|
|
|
|
while (uio->uio_resid > 0) {
|
2012-03-17 03:13:48 +04:00
|
|
|
if (vp->v_size <= uio->uio_offset) {
|
|
|
|
break;
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
bytelen = MIN(uio->uio_resid,
|
|
|
|
vp->v_size - uio->uio_offset);
|
|
|
|
if (bytelen == 0)
|
|
|
|
break;
|
|
|
|
|
2008-11-26 23:17:33 +03:00
|
|
|
error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
|
|
|
|
UBC_READ | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp));
|
2006-11-08 01:10:18 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(vp), PUFFS_UPDATEATIME, 0);
|
2006-11-08 01:10:18 +03:00
|
|
|
} else {
|
2006-10-23 02:43:23 +04:00
|
|
|
/*
|
2006-11-19 01:45:39 +03:00
|
|
|
* in case it's not a regular file or we're operating
|
|
|
|
* uncached, do read in the old-fashioned style,
|
|
|
|
* i.e. explicit read operations
|
2006-10-23 02:43:23 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
tomove = PUFFS_TOMOVE(uio->uio_resid, pmp);
|
2007-10-11 23:41:13 +04:00
|
|
|
argsize = sizeof(struct puffs_vnmsg_read);
|
|
|
|
puffs_msgmem_alloc(argsize + tomove, &park_read,
|
2008-01-03 01:37:19 +03:00
|
|
|
(void *)&read_msg, 1);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
while (uio->uio_resid > 0) {
|
2007-06-26 16:50:49 +04:00
|
|
|
tomove = PUFFS_TOMOVE(uio->uio_resid, pmp);
|
2007-10-26 20:54:50 +04:00
|
|
|
memset(read_msg, 0, argsize); /* XXX: touser KASSERT */
|
2007-10-11 23:41:13 +04:00
|
|
|
RWARGS(read_msg, ap->a_ioflag, tomove,
|
2007-06-26 16:50:49 +04:00
|
|
|
uio->uio_offset, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_read, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_READ, VPTOPNC(vp));
|
|
|
|
puffs_msg_setdelta(park_read, tomove);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_read, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2006-11-08 01:10:18 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
if (read_msg->pvnr_resid > tomove) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_READ,
|
2007-10-02 01:09:07 +04:00
|
|
|
E2BIG, "resid grew", VPTOPNC(ap->a_vp));
|
2007-09-28 01:14:49 +04:00
|
|
|
error = EPROTO;
|
2006-11-08 01:10:18 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
error = uiomove(read_msg->pvnr_data,
|
|
|
|
tomove - read_msg->pvnr_resid, uio);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* in case the file is out of juice, resid from
|
|
|
|
* userspace is != 0. and the error-case is
|
|
|
|
* quite obvious
|
|
|
|
*/
|
2007-10-11 23:41:13 +04:00
|
|
|
if (error || read_msg->pvnr_resid)
|
2006-11-08 01:10:18 +03:00
|
|
|
break;
|
|
|
|
}
|
2007-10-11 23:41:13 +04:00
|
|
|
|
|
|
|
puffs_msgmem_release(park_read);
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
/*
|
|
|
|
* XXX: in case of a failure, this leaves uio in a bad state.
|
|
|
|
* We could theoretically copy the uio and iovecs and "replay"
|
|
|
|
* them the right amount after the userspace trip, but don't
|
|
|
|
* bother for now.
|
|
|
|
*/
|
2006-10-23 02:43:23 +04:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_write(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_write_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
2007-06-01 19:59:37 +04:00
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, write);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
2011-08-29 08:12:45 +04:00
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
struct uio *uio = ap->a_uio;
|
2006-10-23 02:43:23 +04:00
|
|
|
size_t tomove, argsize;
|
2006-11-08 01:10:18 +03:00
|
|
|
off_t oldoff, newoff, origoff;
|
|
|
|
vsize_t bytelen;
|
2006-11-19 01:45:39 +03:00
|
|
|
int error, uflags;
|
2007-02-10 16:12:43 +03:00
|
|
|
int ubcflags;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
error = uflags = 0;
|
2007-10-11 23:41:13 +04:00
|
|
|
write_msg = NULL;
|
2006-11-17 20:48:02 +03:00
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
mutex_enter(&pn->pn_sizemtx);
|
|
|
|
|
2007-06-25 02:16:03 +04:00
|
|
|
if (vp->v_type == VREG && PUFFS_USE_PAGECACHE(pmp)) {
|
2008-11-26 23:17:33 +03:00
|
|
|
ubcflags = UBC_WRITE | UBC_PARTIALOK | UBC_UNMAP_FLAG(vp);
|
2006-11-18 11:18:24 +03:00
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
/*
|
|
|
|
* userspace *should* be allowed to control this,
|
|
|
|
* but with UBC it's a bit unclear how to handle it
|
|
|
|
*/
|
|
|
|
if (ap->a_ioflag & IO_APPEND)
|
|
|
|
uio->uio_offset = vp->v_size;
|
|
|
|
|
|
|
|
origoff = uio->uio_offset;
|
|
|
|
while (uio->uio_resid > 0) {
|
2011-11-19 01:18:50 +04:00
|
|
|
if (vp->v_mount->mnt_flag & MNT_RELATIME)
|
|
|
|
uflags |= PUFFS_UPDATEATIME;
|
2006-11-08 01:10:18 +03:00
|
|
|
uflags |= PUFFS_UPDATECTIME;
|
|
|
|
uflags |= PUFFS_UPDATEMTIME;
|
|
|
|
oldoff = uio->uio_offset;
|
|
|
|
bytelen = uio->uio_resid;
|
|
|
|
|
2007-06-05 16:31:30 +04:00
|
|
|
newoff = oldoff + bytelen;
|
|
|
|
if (vp->v_size < newoff) {
|
|
|
|
uvm_vnp_setwritesize(vp, newoff);
|
|
|
|
}
|
|
|
|
error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
|
2007-07-27 13:50:36 +04:00
|
|
|
UVM_ADV_RANDOM, ubcflags);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
/*
|
2007-06-05 16:31:30 +04:00
|
|
|
* In case of a ubc_uiomove() error,
|
2007-04-24 20:29:29 +04:00
|
|
|
* opt to not extend the file at all and
|
|
|
|
* return an error. Otherwise, if we attempt
|
|
|
|
* to clear the memory we couldn't fault to,
|
|
|
|
* we might generate a kernel page fault.
|
2006-11-08 01:10:18 +03:00
|
|
|
*/
|
2007-06-05 16:31:30 +04:00
|
|
|
if (vp->v_size < newoff) {
|
|
|
|
if (error == 0) {
|
|
|
|
uflags |= PUFFS_UPDATESIZE;
|
|
|
|
uvm_vnp_setsize(vp, newoff);
|
|
|
|
} else {
|
|
|
|
uvm_vnp_setwritesize(vp, vp->v_size);
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2007-02-10 16:12:43 +03:00
|
|
|
/*
|
|
|
|
* If we're writing large files, flush to file server
|
|
|
|
* every 64k. Otherwise we can very easily exhaust
|
|
|
|
* kernel and user memory, as the file server cannot
|
|
|
|
* really keep up with our writing speed.
|
|
|
|
*
|
|
|
|
* Note: this does *NOT* honor MNT_ASYNC, because
|
|
|
|
* that gives userland too much say in the kernel.
|
|
|
|
*/
|
|
|
|
if (oldoff >> 16 != uio->uio_offset >> 16) {
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2006-11-08 01:10:18 +03:00
|
|
|
error = VOP_PUTPAGES(vp, oldoff & ~0xffff,
|
2007-02-10 16:12:43 +03:00
|
|
|
uio->uio_offset & ~0xffff,
|
|
|
|
PGO_CLEANIT | PGO_SYNCIO);
|
2006-11-08 01:10:18 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-04-22 22:02:05 +04:00
|
|
|
/* synchronous I/O? */
|
2006-11-19 01:45:39 +03:00
|
|
|
if (error == 0 && ap->a_ioflag & IO_SYNC) {
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2006-11-08 01:10:18 +03:00
|
|
|
error = VOP_PUTPAGES(vp, trunc_page(origoff),
|
2006-11-19 01:45:39 +03:00
|
|
|
round_page(uio->uio_offset),
|
|
|
|
PGO_CLEANIT | PGO_SYNCIO);
|
2007-04-22 22:02:05 +04:00
|
|
|
|
2007-08-13 13:48:55 +04:00
|
|
|
/* write through page cache? */
|
2007-04-22 22:02:05 +04:00
|
|
|
} else if (error == 0 && pmp->pmp_flags & PUFFS_KFLAG_WTCACHE) {
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2007-04-22 22:02:05 +04:00
|
|
|
error = VOP_PUTPAGES(vp, trunc_page(origoff),
|
|
|
|
round_page(uio->uio_offset), PGO_CLEANIT);
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(vp), uflags, vp->v_size);
|
2006-11-08 01:10:18 +03:00
|
|
|
} else {
|
2006-12-06 02:03:28 +03:00
|
|
|
/* tomove is non-increasing */
|
2006-10-23 02:43:23 +04:00
|
|
|
tomove = PUFFS_TOMOVE(uio->uio_resid, pmp);
|
2007-10-11 23:41:13 +04:00
|
|
|
argsize = sizeof(struct puffs_vnmsg_write) + tomove;
|
2008-01-03 01:37:19 +03:00
|
|
|
puffs_msgmem_alloc(argsize, &park_write, (void *)&write_msg,1);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
while (uio->uio_resid > 0) {
|
2007-06-26 16:50:49 +04:00
|
|
|
/* move data to buffer */
|
|
|
|
tomove = PUFFS_TOMOVE(uio->uio_resid, pmp);
|
2007-10-26 20:54:50 +04:00
|
|
|
memset(write_msg, 0, argsize); /* XXX: touser KASSERT */
|
2007-10-11 23:41:13 +04:00
|
|
|
RWARGS(write_msg, ap->a_ioflag, tomove,
|
2007-06-26 16:50:49 +04:00
|
|
|
uio->uio_offset, ap->a_cred);
|
2007-10-11 23:41:13 +04:00
|
|
|
error = uiomove(write_msg->pvnr_data, tomove, uio);
|
2006-11-08 01:10:18 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
/* move buffer to userspace */
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_write, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_WRITE, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_write, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2007-06-26 16:50:49 +04:00
|
|
|
if (error)
|
2006-11-08 01:10:18 +03:00
|
|
|
break;
|
2007-06-26 16:50:49 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
if (write_msg->pvnr_resid > tomove) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_WRITE,
|
2007-10-02 01:09:07 +04:00
|
|
|
E2BIG, "resid grew", VPTOPNC(ap->a_vp));
|
2007-09-28 01:14:49 +04:00
|
|
|
error = EPROTO;
|
2006-11-08 01:10:18 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* adjust file size */
|
2006-12-06 02:07:42 +03:00
|
|
|
if (vp->v_size < uio->uio_offset)
|
|
|
|
uvm_vnp_setsize(vp, uio->uio_offset);
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
/* didn't move everything? bad userspace. bail */
|
2007-10-11 23:41:13 +04:00
|
|
|
if (write_msg->pvnr_resid != 0) {
|
2006-11-08 01:10:18 +03:00
|
|
|
error = EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_msgmem_release(park_write);
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
2011-08-29 08:12:45 +04:00
|
|
|
mutex_exit(&pn->pn_sizemtx);
|
2006-10-23 02:43:23 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_print(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_print_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, print);
|
2006-10-23 02:43:23 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2006-10-23 16:21:39 +04:00
|
|
|
struct puffs_node *pn = vp->v_data;
|
2007-11-16 23:32:17 +03:00
|
|
|
int error;
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
/* kernel portion */
|
2010-03-29 17:11:32 +04:00
|
|
|
printf("tag VT_PUFFS, vnode %p, puffs node: %p,\n"
|
|
|
|
"\tuserspace cookie: %p", vp, pn, pn->pn_cookie);
|
2006-10-27 16:25:16 +04:00
|
|
|
if (vp->v_type == VFIFO)
|
2010-03-29 17:11:32 +04:00
|
|
|
VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
|
2010-03-27 05:37:34 +03:00
|
|
|
printf("\n");
|
2006-10-23 02:43:23 +04:00
|
|
|
|
|
|
|
/* userspace portion */
|
2007-10-11 23:41:13 +04:00
|
|
|
if (EXISTSOP(pmp, PRINT)) {
|
|
|
|
PUFFS_MSG_ALLOC(vn, print);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_print, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_PRINT, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_print, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(print);
|
|
|
|
}
|
2006-12-01 15:37:41 +03:00
|
|
|
|
|
|
|
return 0;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_pathconf(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_pathconf_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_name;
|
|
|
|
register_t *a_retval;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, pathconf);
|
2007-10-02 01:09:07 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2006-10-23 02:43:23 +04:00
|
|
|
int error;
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, pathconf);
|
|
|
|
pathconf_msg->pvnr_name = ap->a_name;
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_pathconf, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_PATHCONF, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_pathconf, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2007-10-11 23:41:13 +04:00
|
|
|
if (!error)
|
|
|
|
*ap->a_retval = pathconf_msg->pvnr_retval;
|
|
|
|
PUFFS_MSG_RELEASE(pathconf);
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_advlock(void *v)
|
2006-10-23 02:43:23 +04:00
|
|
|
{
|
|
|
|
struct vop_advlock_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
void *a_id;
|
|
|
|
int a_op;
|
|
|
|
struct flock *a_fl;
|
|
|
|
int a_flags;
|
|
|
|
} */ *ap = v;
|
2011-05-03 17:16:47 +04:00
|
|
|
PUFFS_MSG_VARS(vn, advlock);
|
2007-10-02 01:09:07 +04:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2011-01-11 17:04:54 +03:00
|
|
|
struct puffs_node *pn = VPTOPP(vp);
|
2011-05-03 17:16:47 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (!EXISTSOP(pmp, ADVLOCK))
|
|
|
|
return lf_advlock(ap, &pn->pn_lockf, vp->v_size);
|
|
|
|
|
|
|
|
PUFFS_MSG_ALLOC(vn, advlock);
|
|
|
|
(void)memcpy(&advlock_msg->pvnr_fl, ap->a_fl,
|
|
|
|
sizeof(advlock_msg->pvnr_fl));
|
|
|
|
advlock_msg->pvnr_id = ap->a_id;
|
|
|
|
advlock_msg->pvnr_op = ap->a_op;
|
|
|
|
advlock_msg->pvnr_flags = ap->a_flags;
|
|
|
|
puffs_msg_setinfo(park_advlock, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_ADVLOCK, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_advlock, vp->v_data, NULL, error);
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
PUFFS_MSG_RELEASE(advlock);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2011-05-03 17:16:47 +04:00
|
|
|
return error;
|
2006-10-23 02:43:23 +04:00
|
|
|
}
|
2007-06-26 16:50:49 +04:00
|
|
|
|
2009-10-18 03:16:05 +04:00
|
|
|
int
|
|
|
|
puffs_vnop_abortop(void *v)
|
|
|
|
{
|
|
|
|
struct vop_abortop_args /* {
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
}; */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, abortop);
|
|
|
|
struct vnode *dvp = ap->a_dvp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
|
|
|
|
struct componentname *cnp = ap->a_cnp;
|
|
|
|
|
|
|
|
if (EXISTSOP(pmp, ABORTOP)) {
|
|
|
|
PUFFS_MSG_ALLOC(vn, abortop);
|
|
|
|
puffs_makecn(&abortop_msg->pvnr_cn, &abortop_msg->pvnr_cn_cred,
|
|
|
|
cnp, PUFFS_USE_FULLPNBUF(pmp));
|
2009-11-19 18:50:49 +03:00
|
|
|
puffs_msg_setfaf(park_abortop);
|
2009-10-18 03:16:05 +04:00
|
|
|
puffs_msg_setinfo(park_abortop, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_ABORTOP, VPTOPNC(dvp));
|
|
|
|
|
2009-11-19 18:50:49 +03:00
|
|
|
puffs_msg_enqueue(pmp, park_abortop);
|
2009-10-18 03:16:05 +04:00
|
|
|
PUFFS_MSG_RELEASE(abortop);
|
|
|
|
}
|
|
|
|
|
|
|
|
return genfs_abortop(v);
|
|
|
|
}
|
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
#define BIOASYNC(bp) (bp->b_flags & B_ASYNC)
|
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
/*
|
|
|
|
* This maps itself to PUFFS_VN_READ/WRITE for data transfer.
|
|
|
|
*/
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_strategy(void *v)
|
2006-11-08 01:10:18 +03:00
|
|
|
{
|
|
|
|
struct vop_strategy_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct buf *a_bp;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, rw);
|
2007-01-19 17:59:50 +03:00
|
|
|
struct vnode *vp = ap->a_vp;
|
2007-10-11 23:41:13 +04:00
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2007-01-21 19:29:31 +03:00
|
|
|
struct puffs_node *pn;
|
2006-11-08 01:10:18 +03:00
|
|
|
struct buf *bp;
|
|
|
|
size_t argsize;
|
2006-11-18 22:33:02 +03:00
|
|
|
size_t tomove, moved;
|
2011-10-18 19:39:09 +04:00
|
|
|
int error, dofaf, cansleep, dobiodone;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-01-19 17:59:50 +03:00
|
|
|
pmp = MPTOPUFFSMP(vp->v_mount);
|
2006-11-08 01:10:18 +03:00
|
|
|
bp = ap->a_bp;
|
2007-01-19 20:52:01 +03:00
|
|
|
error = 0;
|
2007-06-26 16:50:49 +04:00
|
|
|
dofaf = 0;
|
2011-10-18 19:39:09 +04:00
|
|
|
cansleep = 0;
|
2007-01-21 19:29:31 +03:00
|
|
|
pn = VPTOPP(vp);
|
2007-10-11 23:41:13 +04:00
|
|
|
park_rw = NULL; /* explicit */
|
2007-11-17 21:09:04 +03:00
|
|
|
dobiodone = 1;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-11-21 19:30:40 +03:00
|
|
|
if ((BUF_ISREAD(bp) && !EXISTSOP(pmp, READ))
|
|
|
|
|| (BUF_ISWRITE(bp) && !EXISTSOP(pmp, WRITE)))
|
2007-06-26 16:50:49 +04:00
|
|
|
ERROUT(EOPNOTSUPP);
|
2007-01-21 19:29:31 +03:00
|
|
|
|
2009-11-05 22:22:57 +03:00
|
|
|
/*
|
|
|
|
* Short-circuit optimization: don't flush buffer in between
|
|
|
|
* VOP_INACTIVE and VOP_RECLAIM in case the node has no references.
|
|
|
|
*/
|
|
|
|
if (pn->pn_stat & PNODE_DYING) {
|
|
|
|
KASSERT(BUF_ISWRITE(bp));
|
|
|
|
bp->b_resid = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
2007-10-11 23:41:13 +04:00
|
|
|
if (bp->b_bcount > pmp->pmp_msg_maxsize - PUFFS_MSGSTRUCT_MAX)
|
2007-02-08 08:09:25 +03:00
|
|
|
panic("puffs_strategy: wildly inappropriate buf bcount %d",
|
|
|
|
bp->b_bcount);
|
2006-11-08 01:10:18 +03:00
|
|
|
#endif
|
|
|
|
|
2007-01-19 17:59:50 +03:00
|
|
|
/*
|
2007-01-27 01:59:49 +03:00
|
|
|
* See explanation for the necessity of a FAF in puffs_fsync.
|
|
|
|
*
|
|
|
|
* Also, do FAF in case we're suspending.
|
|
|
|
* See puffs_vfsops.c:pageflush()
|
2007-01-19 17:59:50 +03:00
|
|
|
*/
|
2007-11-21 19:30:40 +03:00
|
|
|
if (BUF_ISWRITE(bp)) {
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2007-10-11 00:42:20 +04:00
|
|
|
if (vp->v_iflag & VI_XLOCK)
|
2007-06-26 16:50:49 +04:00
|
|
|
dofaf = 1;
|
2009-11-05 22:42:44 +03:00
|
|
|
if (pn->pn_stat & PNODE_FAF)
|
2007-06-26 16:50:49 +04:00
|
|
|
dofaf = 1;
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_exit(vp->v_interlock);
|
2007-01-19 17:59:50 +03:00
|
|
|
}
|
|
|
|
|
2011-10-18 19:39:09 +04:00
|
|
|
cansleep = (curlwp == uvm.pagedaemon_lwp || dofaf) ? 0 : 1;
|
|
|
|
|
2011-10-19 05:39:29 +04:00
|
|
|
KASSERT(curlwp != uvm.pagedaemon_lwp || dofaf || BIOASYNC(bp));
|
2007-02-08 07:52:23 +03:00
|
|
|
|
2007-06-26 16:50:49 +04:00
|
|
|
/* allocate transport structure */
|
2007-03-14 15:13:58 +03:00
|
|
|
tomove = PUFFS_TOMOVE(bp->b_bcount, pmp);
|
2007-10-11 23:41:13 +04:00
|
|
|
argsize = sizeof(struct puffs_vnmsg_rw);
|
|
|
|
error = puffs_msgmem_alloc(argsize + tomove, &park_rw,
|
2011-10-18 19:39:09 +04:00
|
|
|
(void *)&rw_msg, cansleep);
|
2007-10-11 23:41:13 +04:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
RWARGS(rw_msg, 0, tomove, bp->b_blkno << DEV_BSHIFT, FSCRED);
|
2007-06-26 16:50:49 +04:00
|
|
|
|
|
|
|
/* 2x2 cases: read/write, faf/nofaf */
|
2007-11-21 19:30:40 +03:00
|
|
|
if (BUF_ISREAD(bp)) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_rw, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_READ, VPTOPNC(vp));
|
|
|
|
puffs_msg_setdelta(park_rw, tomove);
|
2007-11-17 21:09:04 +03:00
|
|
|
if (BIOASYNC(bp)) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setcall(park_rw,
|
|
|
|
puffs_parkdone_asyncbioread, bp);
|
|
|
|
puffs_msg_enqueue(pmp, park_rw);
|
2007-11-17 21:09:04 +03:00
|
|
|
dobiodone = 0;
|
2007-06-26 16:50:49 +04:00
|
|
|
} else {
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_rw, vp->v_data,
|
|
|
|
NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2007-06-26 16:50:49 +04:00
|
|
|
if (error)
|
|
|
|
goto out;
|
2007-03-14 15:13:58 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
if (rw_msg->pvnr_resid > tomove) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_READ,
|
2007-10-02 01:09:07 +04:00
|
|
|
E2BIG, "resid grew", VPTOPNC(vp));
|
2007-09-28 01:14:49 +04:00
|
|
|
ERROUT(EPROTO);
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
moved = tomove - rw_msg->pvnr_resid;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
(void)memcpy(bp->b_data, rw_msg->pvnr_data, moved);
|
2007-06-26 16:50:49 +04:00
|
|
|
bp->b_resid = bp->b_bcount - moved;
|
2006-11-18 22:33:02 +03:00
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
} else {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_rw, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_WRITE, VPTOPNC(vp));
|
2007-03-20 13:21:58 +03:00
|
|
|
/*
|
|
|
|
* make pages read-only before we write them if we want
|
|
|
|
* write caching info
|
|
|
|
*/
|
|
|
|
if (PUFFS_WCACHEINFO(pmp)) {
|
|
|
|
struct uvm_object *uobj = &vp->v_uobj;
|
|
|
|
int npages = (bp->b_bcount + PAGE_SIZE-1) >> PAGE_SHIFT;
|
|
|
|
struct vm_page *vmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
vmp= uvm_pageratop((vaddr_t)bp->b_data
|
|
|
|
+ (i << PAGE_SHIFT));
|
|
|
|
DPRINTF(("puffs_strategy: write-protecting "
|
|
|
|
"vp %p page %p, offset %" PRId64"\n",
|
|
|
|
vp, vmp, vmp->offset));
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(uobj->vmobjlock);
|
2007-03-20 13:21:58 +03:00
|
|
|
vmp->flags |= PG_RDONLY;
|
|
|
|
pmap_page_protect(vmp, VM_PROT_READ);
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_exit(uobj->vmobjlock);
|
2007-03-20 13:21:58 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
(void)memcpy(&rw_msg->pvnr_data, bp->b_data, tomove);
|
2007-11-17 21:09:04 +03:00
|
|
|
if (dofaf) {
|
2007-10-11 23:41:13 +04:00
|
|
|
puffs_msg_setfaf(park_rw);
|
2007-11-17 21:09:04 +03:00
|
|
|
} else if (BIOASYNC(bp)) {
|
|
|
|
puffs_msg_setcall(park_rw,
|
|
|
|
puffs_parkdone_asyncbiowrite, bp);
|
|
|
|
dobiodone = 0;
|
|
|
|
}
|
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_rw, vp->v_data, NULL, error);
|
2007-10-11 23:41:13 +04:00
|
|
|
|
2007-11-17 21:09:04 +03:00
|
|
|
if (dobiodone == 0)
|
|
|
|
goto out;
|
|
|
|
|
2007-10-23 22:27:10 +04:00
|
|
|
/*
|
|
|
|
* XXXXXXXX: wrong, but kernel can't survive strategy
|
|
|
|
* failure currently. Here, have one more X: X.
|
|
|
|
*/
|
|
|
|
if (error != ENOMEM)
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (rw_msg->pvnr_resid > tomove) {
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_senderr(pmp, PUFFS_ERR_WRITE,
|
2007-10-23 22:27:10 +04:00
|
|
|
E2BIG, "resid grew", VPTOPNC(vp));
|
|
|
|
ERROUT(EPROTO);
|
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
|
2007-10-23 22:27:10 +04:00
|
|
|
/*
|
|
|
|
* FAF moved everything. Frankly, we don't
|
|
|
|
* really have a choice.
|
|
|
|
*/
|
|
|
|
if (dofaf && error == 0)
|
|
|
|
moved = tomove;
|
|
|
|
else
|
2007-10-11 23:41:13 +04:00
|
|
|
moved = tomove - rw_msg->pvnr_resid;
|
2006-11-08 14:49:36 +03:00
|
|
|
|
2007-10-23 22:27:10 +04:00
|
|
|
bp->b_resid = bp->b_bcount - moved;
|
|
|
|
if (bp->b_resid != 0) {
|
|
|
|
ERROUT(EIO);
|
2006-11-18 22:33:02 +03:00
|
|
|
}
|
2006-11-08 01:10:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2007-10-11 23:41:13 +04:00
|
|
|
if (park_rw)
|
|
|
|
puffs_msgmem_release(park_rw);
|
2006-11-18 22:33:02 +03:00
|
|
|
|
2007-07-29 17:31:07 +04:00
|
|
|
if (error)
|
2007-01-26 02:43:57 +03:00
|
|
|
bp->b_error = error;
|
|
|
|
|
2007-11-17 21:09:04 +03:00
|
|
|
if (error || dobiodone)
|
2007-03-14 15:13:58 +03:00
|
|
|
biodone(bp);
|
2007-06-26 16:50:49 +04:00
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
return error;
|
|
|
|
}
|
2006-10-23 02:43:23 +04:00
|
|
|
|
2006-12-07 19:58:39 +03:00
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_mmap(void *v)
|
2006-12-07 19:58:39 +03:00
|
|
|
{
|
|
|
|
struct vop_mmap_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
2007-07-27 12:26:38 +04:00
|
|
|
vm_prot_t a_prot;
|
2006-12-07 19:58:39 +03:00
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_VARS(vn, mmap);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
2006-12-07 19:58:39 +03:00
|
|
|
int error;
|
|
|
|
|
2007-06-25 02:16:03 +04:00
|
|
|
if (!PUFFS_USE_PAGECACHE(pmp))
|
2006-12-07 19:58:39 +03:00
|
|
|
return genfs_eopnotsupp(v);
|
|
|
|
|
|
|
|
if (EXISTSOP(pmp, MMAP)) {
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_ALLOC(vn, mmap);
|
|
|
|
mmap_msg->pvnr_prot = ap->a_prot;
|
|
|
|
puffs_credcvt(&mmap_msg->pvnr_cred, ap->a_cred);
|
2007-11-16 23:32:17 +03:00
|
|
|
puffs_msg_setinfo(park_mmap, PUFFSOP_VN,
|
|
|
|
PUFFS_VN_MMAP, VPTOPNC(vp));
|
2006-12-07 19:58:39 +03:00
|
|
|
|
2007-11-16 23:32:17 +03:00
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_mmap, vp->v_data, NULL, error);
|
2007-10-02 01:09:07 +04:00
|
|
|
error = checkerr(pmp, error, __func__);
|
2007-10-11 23:41:13 +04:00
|
|
|
PUFFS_MSG_RELEASE(mmap);
|
2006-12-07 19:58:39 +03:00
|
|
|
} else {
|
|
|
|
error = genfs_mmap(v);
|
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-23 02:43:23 +04:00
|
|
|
/*
|
|
|
|
* The rest don't get a free trip to userspace and back, they
|
|
|
|
* have to stay within the kernel.
|
|
|
|
*/
|
|
|
|
|
2006-11-08 01:10:18 +03:00
|
|
|
/*
|
|
|
|
* bmap doesn't really make any sense for puffs, so just 1:1 map it.
|
|
|
|
* well, maybe somehow, somewhere, some day ....
|
|
|
|
*/
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_bmap(void *v)
|
2006-11-08 01:10:18 +03:00
|
|
|
{
|
|
|
|
struct vop_bmap_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
daddr_t a_bn;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
daddr_t *a_bnp;
|
|
|
|
int *a_runp;
|
|
|
|
} */ *ap = v;
|
|
|
|
struct puffs_mount *pmp;
|
|
|
|
|
|
|
|
pmp = MPTOPUFFSMP(ap->a_vp->v_mount);
|
|
|
|
|
|
|
|
if (ap->a_vpp)
|
|
|
|
*ap->a_vpp = ap->a_vp;
|
|
|
|
if (ap->a_bnp)
|
|
|
|
*ap->a_bnp = ap->a_bn;
|
|
|
|
if (ap->a_runp)
|
2007-03-29 20:04:26 +04:00
|
|
|
*ap->a_runp
|
2007-10-11 23:41:13 +04:00
|
|
|
= (PUFFS_TOMOVE(pmp->pmp_msg_maxsize, pmp)>>DEV_BSHIFT) - 1;
|
2006-11-08 01:10:18 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-20 13:21:58 +03:00
|
|
|
/*
|
|
|
|
* Handle getpages faults in puffs. We let genfs_getpages() do most
|
|
|
|
* of the dirty work, but we come in this route to do accounting tasks.
|
|
|
|
* If the user server has specified functions for cache notifications
|
|
|
|
* about reads and/or writes, we record which type of operation we got,
|
|
|
|
* for which page range, and proceed to issue a FAF notification to the
|
|
|
|
* server about it.
|
|
|
|
*/
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_getpages(void *v)
|
2007-03-20 13:21:58 +03:00
|
|
|
{
|
|
|
|
struct vop_getpages_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
voff_t a_offset;
|
|
|
|
struct vm_page **a_m;
|
|
|
|
int *a_count;
|
|
|
|
int a_centeridx;
|
|
|
|
vm_prot_t a_access_type;
|
|
|
|
int a_advice;
|
|
|
|
int a_flags;
|
|
|
|
} */ *ap = v;
|
|
|
|
struct puffs_mount *pmp;
|
2007-07-22 22:22:49 +04:00
|
|
|
struct puffs_node *pn;
|
2007-03-20 13:21:58 +03:00
|
|
|
struct vnode *vp;
|
|
|
|
struct vm_page **pgs;
|
|
|
|
struct puffs_cacheinfo *pcinfo = NULL;
|
|
|
|
struct puffs_cacherun *pcrun;
|
2007-03-29 20:04:26 +04:00
|
|
|
void *parkmem = NULL;
|
2007-03-20 13:21:58 +03:00
|
|
|
size_t runsizes;
|
|
|
|
int i, npages, si, streakon;
|
|
|
|
int error, locked, write;
|
|
|
|
|
|
|
|
pmp = MPTOPUFFSMP(ap->a_vp->v_mount);
|
|
|
|
npages = *ap->a_count;
|
|
|
|
pgs = ap->a_m;
|
|
|
|
vp = ap->a_vp;
|
2007-07-22 22:22:49 +04:00
|
|
|
pn = vp->v_data;
|
2007-03-20 13:21:58 +03:00
|
|
|
locked = (ap->a_flags & PGO_LOCKED) != 0;
|
|
|
|
write = (ap->a_access_type & VM_PROT_WRITE) != 0;
|
|
|
|
|
|
|
|
/* ccg xnaht - gets Wuninitialized wrong */
|
|
|
|
pcrun = NULL;
|
|
|
|
runsizes = 0;
|
|
|
|
|
2007-07-22 22:22:49 +04:00
|
|
|
/*
|
|
|
|
* Check that we aren't trying to fault in pages which our file
|
|
|
|
* server doesn't know about. This happens if we extend a file by
|
|
|
|
* skipping some pages and later try to fault in pages which
|
|
|
|
* are between pn_serversize and vp_size. This check optimizes
|
|
|
|
* away the common case where a file is being extended.
|
|
|
|
*/
|
|
|
|
if (ap->a_offset >= pn->pn_serversize && ap->a_offset < vp->v_size) {
|
|
|
|
struct vattr va;
|
|
|
|
|
|
|
|
/* try again later when we can block */
|
|
|
|
if (locked)
|
|
|
|
ERROUT(EBUSY);
|
|
|
|
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_exit(vp->v_interlock);
|
2007-07-22 22:22:49 +04:00
|
|
|
vattr_null(&va);
|
|
|
|
va.va_size = vp->v_size;
|
2007-12-31 02:04:12 +03:00
|
|
|
error = dosetattr(vp, &va, FSCRED, 0);
|
2007-07-22 22:22:49 +04:00
|
|
|
if (error)
|
|
|
|
ERROUT(error);
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_interlock);
|
2007-07-22 22:22:49 +04:00
|
|
|
}
|
|
|
|
|
2007-03-20 13:21:58 +03:00
|
|
|
if (write && PUFFS_WCACHEINFO(pmp)) {
|
2007-10-11 23:41:13 +04:00
|
|
|
#ifdef notnowjohn
|
2007-03-20 13:21:58 +03:00
|
|
|
/* allocate worst-case memory */
|
|
|
|
runsizes = ((npages / 2) + 1) * sizeof(struct puffs_cacherun);
|
2011-10-19 05:39:29 +04:00
|
|
|
KASSERT(curlwp != uvm.pagedaemon_lwp || locked);
|
2009-09-12 22:17:55 +04:00
|
|
|
pcinfo = kmem_zalloc(sizeof(struct puffs_cacheinfo) + runsize,
|
2007-11-20 14:51:01 +03:00
|
|
|
locked ? KM_NOSLEEP : KM_SLEEP);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* can't block if we're locked and can't mess up caching
|
|
|
|
* information for fs server. so come back later, please
|
|
|
|
*/
|
2007-06-26 16:50:49 +04:00
|
|
|
if (pcinfo == NULL)
|
|
|
|
ERROUT(ENOMEM);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
2007-04-04 20:13:51 +04:00
|
|
|
parkmem = puffs_park_alloc(locked == 0);
|
2007-06-26 16:50:49 +04:00
|
|
|
if (parkmem == NULL)
|
|
|
|
ERROUT(ENOMEM);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
|
|
|
pcrun = pcinfo->pcache_runs;
|
2007-10-11 23:41:13 +04:00
|
|
|
#else
|
|
|
|
(void)parkmem;
|
|
|
|
#endif
|
2007-03-20 13:21:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
error = genfs_getpages(v);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (PUFFS_WCACHEINFO(pmp) == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Let's see whose fault it was and inform the user server of
|
|
|
|
* possibly read/written pages. Map pages from read faults
|
|
|
|
* strictly read-only, since otherwise we might miss info on
|
|
|
|
* when the page is actually write-faulted to.
|
|
|
|
*/
|
|
|
|
if (!locked)
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_enter(vp->v_uobj.vmobjlock);
|
2007-03-20 13:21:58 +03:00
|
|
|
for (i = 0, si = 0, streakon = 0; i < npages; i++) {
|
|
|
|
if (pgs[i] == NULL || pgs[i] == PGO_DONTCARE) {
|
|
|
|
if (streakon && write) {
|
|
|
|
streakon = 0;
|
|
|
|
pcrun[si].pcache_runend
|
|
|
|
= trunc_page(pgs[i]->offset) + PAGE_MASK;
|
|
|
|
si++;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (streakon == 0 && write) {
|
|
|
|
streakon = 1;
|
|
|
|
pcrun[si].pcache_runstart = pgs[i]->offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!write)
|
|
|
|
pgs[i]->flags |= PG_RDONLY;
|
|
|
|
}
|
|
|
|
/* was the last page part of our streak? */
|
|
|
|
if (streakon) {
|
|
|
|
pcrun[si].pcache_runend
|
|
|
|
= trunc_page(pgs[i-1]->offset) + PAGE_MASK;
|
|
|
|
si++;
|
|
|
|
}
|
|
|
|
if (!locked)
|
2011-06-12 07:35:36 +04:00
|
|
|
mutex_exit(vp->v_uobj.vmobjlock);
|
2007-03-20 13:21:58 +03:00
|
|
|
|
|
|
|
KASSERT(si <= (npages / 2) + 1);
|
|
|
|
|
2007-10-11 23:41:13 +04:00
|
|
|
#ifdef notnowjohn
|
2007-03-20 13:21:58 +03:00
|
|
|
/* send results to userspace */
|
|
|
|
if (write)
|
2007-03-29 20:04:26 +04:00
|
|
|
puffs_cacheop(pmp, parkmem, pcinfo,
|
2007-03-20 13:21:58 +03:00
|
|
|
sizeof(struct puffs_cacheinfo) + runsizes, VPTOPNC(vp));
|
2007-10-11 23:41:13 +04:00
|
|
|
#endif
|
2007-03-20 13:21:58 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (error) {
|
|
|
|
if (pcinfo != NULL)
|
2007-11-20 14:51:01 +03:00
|
|
|
kmem_free(pcinfo,
|
|
|
|
sizeof(struct puffs_cacheinfo) + runsizes);
|
2007-10-11 23:41:13 +04:00
|
|
|
#ifdef notnowjohn
|
2007-03-29 20:04:26 +04:00
|
|
|
if (parkmem != NULL)
|
2007-04-04 20:13:51 +04:00
|
|
|
puffs_park_release(parkmem, 1);
|
2007-10-11 23:41:13 +04:00
|
|
|
#endif
|
2007-03-20 13:21:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
2007-01-27 01:59:49 +03:00
|
|
|
|
2010-05-21 14:16:54 +04:00
|
|
|
/*
|
|
|
|
* Extended attribute support.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
puffs_vnop_getextattr(void *v)
|
|
|
|
{
|
|
|
|
struct vop_getextattr_args /*
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_attrnamespace;
|
|
|
|
const char *a_name;
|
|
|
|
struct uio *a_uio;
|
|
|
|
size_t *a_size;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
}; */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, getextattr);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int attrnamespace = ap->a_attrnamespace;
|
|
|
|
const char *name = ap->a_name;
|
|
|
|
struct uio *uio = ap->a_uio;
|
|
|
|
size_t *sizep = ap->a_size;
|
|
|
|
size_t tomove, resid;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uio)
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
else
|
|
|
|
resid = 0;
|
|
|
|
|
|
|
|
tomove = PUFFS_TOMOVE(resid, pmp);
|
|
|
|
if (tomove != resid) {
|
|
|
|
error = E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
puffs_msgmem_alloc(sizeof(struct puffs_vnmsg_getextattr) + tomove,
|
|
|
|
&park_getextattr, (void *)&getextattr_msg, 1);
|
|
|
|
|
|
|
|
getextattr_msg->pvnr_attrnamespace = attrnamespace;
|
|
|
|
strlcpy(getextattr_msg->pvnr_attrname, name,
|
|
|
|
sizeof(getextattr_msg->pvnr_attrname));
|
|
|
|
puffs_credcvt(&getextattr_msg->pvnr_cred, ap->a_cred);
|
|
|
|
if (sizep)
|
|
|
|
getextattr_msg->pvnr_datasize = 1;
|
|
|
|
getextattr_msg->pvnr_resid = tomove;
|
|
|
|
|
|
|
|
puffs_msg_setinfo(park_getextattr,
|
|
|
|
PUFFSOP_VN, PUFFS_VN_GETEXTATTR, VPTOPNC(vp));
|
|
|
|
puffs_msg_setdelta(park_getextattr, tomove);
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_getextattr, vp->v_data, NULL, error);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
resid = getextattr_msg->pvnr_resid;
|
|
|
|
if (resid > tomove) {
|
|
|
|
puffs_senderr(pmp, PUFFS_ERR_GETEXTATTR, E2BIG,
|
|
|
|
"resid grew", VPTOPNC(vp));
|
|
|
|
error = EPROTO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sizep)
|
|
|
|
*sizep = getextattr_msg->pvnr_datasize;
|
|
|
|
if (uio)
|
|
|
|
error = uiomove(getextattr_msg->pvnr_data, tomove - resid, uio);
|
|
|
|
|
|
|
|
out:
|
|
|
|
PUFFS_MSG_RELEASE(getextattr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
puffs_vnop_setextattr(void *v)
|
|
|
|
{
|
|
|
|
struct vop_setextattr_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_attrnamespace;
|
|
|
|
const char *a_name;
|
|
|
|
struct uio *a_uio;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
}; */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, setextattr);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int attrnamespace = ap->a_attrnamespace;
|
|
|
|
const char *name = ap->a_name;
|
|
|
|
struct uio *uio = ap->a_uio;
|
|
|
|
size_t tomove, resid;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uio)
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
else
|
|
|
|
resid = 0;
|
|
|
|
|
|
|
|
tomove = PUFFS_TOMOVE(resid, pmp);
|
|
|
|
if (tomove != resid) {
|
|
|
|
error = E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
puffs_msgmem_alloc(sizeof(struct puffs_vnmsg_setextattr) + tomove,
|
|
|
|
&park_setextattr, (void *)&setextattr_msg, 1);
|
|
|
|
|
|
|
|
setextattr_msg->pvnr_attrnamespace = attrnamespace;
|
|
|
|
strlcpy(setextattr_msg->pvnr_attrname, name,
|
|
|
|
sizeof(setextattr_msg->pvnr_attrname));
|
|
|
|
puffs_credcvt(&setextattr_msg->pvnr_cred, ap->a_cred);
|
|
|
|
setextattr_msg->pvnr_resid = tomove;
|
|
|
|
|
|
|
|
if (uio) {
|
|
|
|
error = uiomove(setextattr_msg->pvnr_data, tomove, uio);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
puffs_msg_setinfo(park_setextattr,
|
|
|
|
PUFFSOP_VN, PUFFS_VN_SETEXTATTR, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_setextattr, vp->v_data, NULL, error);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (setextattr_msg->pvnr_resid != 0)
|
|
|
|
error = EIO;
|
|
|
|
|
|
|
|
out:
|
|
|
|
PUFFS_MSG_RELEASE(setextattr);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
puffs_vnop_listextattr(void *v)
|
|
|
|
{
|
|
|
|
struct vop_listextattr_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_attrnamespace;
|
|
|
|
struct uio *a_uio;
|
|
|
|
size_t *a_size;
|
2011-07-04 12:07:29 +04:00
|
|
|
int a_flag,
|
2010-05-21 14:16:54 +04:00
|
|
|
kauth_cred_t a_cred;
|
|
|
|
}; */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, listextattr);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int attrnamespace = ap->a_attrnamespace;
|
|
|
|
struct uio *uio = ap->a_uio;
|
|
|
|
size_t *sizep = ap->a_size;
|
2011-07-04 12:07:29 +04:00
|
|
|
int flag = ap->a_flag;
|
2010-05-21 14:16:54 +04:00
|
|
|
size_t tomove, resid;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uio)
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
else
|
|
|
|
resid = 0;
|
|
|
|
|
|
|
|
tomove = PUFFS_TOMOVE(resid, pmp);
|
|
|
|
if (tomove != resid) {
|
|
|
|
error = E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
puffs_msgmem_alloc(sizeof(struct puffs_vnmsg_listextattr) + tomove,
|
|
|
|
&park_listextattr, (void *)&listextattr_msg, 1);
|
|
|
|
|
|
|
|
listextattr_msg->pvnr_attrnamespace = attrnamespace;
|
2011-07-04 12:07:29 +04:00
|
|
|
listextattr_msg->pvnr_flag = flag;
|
2010-05-21 14:16:54 +04:00
|
|
|
puffs_credcvt(&listextattr_msg->pvnr_cred, ap->a_cred);
|
|
|
|
listextattr_msg->pvnr_resid = tomove;
|
|
|
|
if (sizep)
|
|
|
|
listextattr_msg->pvnr_datasize = 1;
|
|
|
|
|
|
|
|
puffs_msg_setinfo(park_listextattr,
|
|
|
|
PUFFSOP_VN, PUFFS_VN_LISTEXTATTR, VPTOPNC(vp));
|
|
|
|
puffs_msg_setdelta(park_listextattr, tomove);
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_listextattr, vp->v_data, NULL, error);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
resid = listextattr_msg->pvnr_resid;
|
|
|
|
if (resid > tomove) {
|
|
|
|
puffs_senderr(pmp, PUFFS_ERR_LISTEXTATTR, E2BIG,
|
|
|
|
"resid grew", VPTOPNC(vp));
|
|
|
|
error = EPROTO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sizep)
|
|
|
|
*sizep = listextattr_msg->pvnr_datasize;
|
|
|
|
if (uio)
|
|
|
|
error = uiomove(listextattr_msg->pvnr_data, tomove-resid, uio);
|
|
|
|
|
|
|
|
out:
|
|
|
|
PUFFS_MSG_RELEASE(listextattr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
puffs_vnop_deleteextattr(void *v)
|
|
|
|
{
|
|
|
|
struct vop_deleteextattr_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_attrnamespace;
|
|
|
|
const char *a_name;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
}; */ *ap = v;
|
|
|
|
PUFFS_MSG_VARS(vn, deleteextattr);
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount);
|
|
|
|
int attrnamespace = ap->a_attrnamespace;
|
|
|
|
const char *name = ap->a_name;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
PUFFS_MSG_ALLOC(vn, deleteextattr);
|
|
|
|
deleteextattr_msg->pvnr_attrnamespace = attrnamespace;
|
|
|
|
strlcpy(deleteextattr_msg->pvnr_attrname, name,
|
|
|
|
sizeof(deleteextattr_msg->pvnr_attrname));
|
|
|
|
puffs_credcvt(&deleteextattr_msg->pvnr_cred, ap->a_cred);
|
|
|
|
|
|
|
|
puffs_msg_setinfo(park_deleteextattr,
|
|
|
|
PUFFSOP_VN, PUFFS_VN_DELETEEXTATTR, VPTOPNC(vp));
|
|
|
|
PUFFS_MSG_ENQUEUEWAIT2(pmp, park_deleteextattr,
|
|
|
|
vp->v_data, NULL, error);
|
|
|
|
|
|
|
|
error = checkerr(pmp, error, __func__);
|
|
|
|
|
|
|
|
PUFFS_MSG_RELEASE(deleteextattr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-01-01 23:16:36 +03:00
|
|
|
/*
|
2007-01-11 19:08:58 +03:00
|
|
|
* spec & fifo. These call the miscfs spec and fifo vectors, but issue
|
2007-01-01 23:16:36 +03:00
|
|
|
* FAF update information for the puffs node first.
|
|
|
|
*/
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_spec_read(void *v)
|
2007-01-01 23:16:36 +03:00
|
|
|
{
|
|
|
|
struct vop_read_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
|
|
|
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(ap->a_vp), PUFFS_UPDATEATIME, 0);
|
2007-01-01 23:16:36 +03:00
|
|
|
return VOCALL(spec_vnodeop_p, VOFFSET(vop_read), v);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_spec_write(void *v)
|
2007-01-01 23:16:36 +03:00
|
|
|
{
|
|
|
|
struct vop_write_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-01-01 23:16:36 +03:00
|
|
|
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(ap->a_vp), PUFFS_UPDATEMTIME, 0);
|
2007-01-01 23:16:36 +03:00
|
|
|
return VOCALL(spec_vnodeop_p, VOFFSET(vop_write), v);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_fifo_read(void *v)
|
2007-01-01 23:16:36 +03:00
|
|
|
{
|
|
|
|
struct vop_read_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
|
|
|
} */ *ap = v;
|
|
|
|
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(ap->a_vp), PUFFS_UPDATEATIME, 0);
|
2007-01-01 23:16:36 +03:00
|
|
|
return VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), v);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-12-31 02:04:12 +03:00
|
|
|
puffs_vnop_fifo_write(void *v)
|
2007-01-01 23:16:36 +03:00
|
|
|
{
|
|
|
|
struct vop_write_args /* {
|
|
|
|
const struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
kauth_cred_t a_cred;
|
2008-09-10 23:25:33 +04:00
|
|
|
} */ *ap = v;
|
2007-01-01 23:16:36 +03:00
|
|
|
|
2007-11-18 00:55:29 +03:00
|
|
|
puffs_updatenode(VPTOPP(ap->a_vp), PUFFS_UPDATEMTIME, 0);
|
2007-01-01 23:16:36 +03:00
|
|
|
return VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), v);
|
|
|
|
}
|