CHFS comments

This commit is contained in:
ttoth 2012-10-19 12:44:39 +00:00
parent 5fda429d12
commit bca84b9e1f
21 changed files with 934 additions and 1421 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs.h,v 1.7 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs.h,v 1.8 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -42,8 +42,8 @@
#ifdef _KERNEL
#if 0
#define DBG_MSG
#define DBG_MSG_GC
#define DBG_MSG /* debug messages */
#define DBG_MSG_GC /* garbage collector's debug messages */
#endif
#include <sys/param.h>
@ -80,6 +80,7 @@ TAILQ_HEAD(chfs_dirent_list, chfs_dirent);
#include "media.h"
#include "chfs_inode.h"
/* padding - last two bits used for node masks */
#define CHFS_PAD(x) (((x)+3)&~3)
#ifdef _KERNEL
@ -88,17 +89,19 @@ TAILQ_HEAD(chfs_dirent_list, chfs_dirent);
#define MOUNT_CHFS "chfs"
#endif /* MOUNT_CHFS */
/* state of a vnode */
enum {
VNO_STATE_UNCHECKED, /* CRC checks not yet done */
VNO_STATE_CHECKING, /* CRC checks in progress */
VNO_STATE_PRESENT, /* In core */
VNO_STATE_CHECKEDABSENT,/* Checked, cleared again */
VNO_STATE_GC, /* GCing a 'pristine' node */
VNO_STATE_READING, /* In read_inode() */
VNO_STATE_CLEARING /* In clear_inode() */
VNO_STATE_UNCHECKED, /* CRC checks not yet done */
VNO_STATE_CHECKING, /* CRC checks in progress */
VNO_STATE_PRESENT, /* In core */
VNO_STATE_CHECKEDABSENT, /* Checked, cleared again */
VNO_STATE_GC, /* GCing a 'pristine' node */
VNO_STATE_READING, /* In read_inode() */
VNO_STATE_CLEARING /* In clear_inode() */
};
/* size of the vnode cache (hashtable) */
#define VNODECACHE_SIZE 128
#define MAX_READ_FREE(chmp) (((chmp)->chm_ebh)->eb_size / 8)
@ -106,6 +109,7 @@ enum {
#define MAX_DIRTY_TO_CLEAN 255
#define VERY_DIRTY(chmp, size) ((size) >= (((chmp)->chm_ebh)->eb_size / 2))
/* node errors */
enum {
CHFS_NODE_OK = 0,
CHFS_NODE_BADMAGIC,
@ -113,6 +117,7 @@ enum {
CHFS_NODE_BADNAMECRC
};
/* eraseblock states */
enum {
CHFS_BLK_STATE_FREE = 100,
CHFS_BLK_STATE_CLEAN,
@ -123,25 +128,23 @@ enum {
extern struct pool chfs_inode_pool;
extern const struct genfs_ops chfs_genfsops;
/**
* struct chfs_node_ref - a reference to a node
* @lnr: logical identifier of the eraseblock where the node is
* @offset: offset int hte eraseblock where the node starts
* @next: used at data and dirent nodes, it points to the next data node which
* belongs to the same vnode
*/
/* struct chfs_node_ref - a reference to a node which is on the media */
struct chfs_node_ref
{
struct chfs_node_ref *nref_next;
uint32_t nref_lnr;
uint32_t nref_offset;
struct chfs_node_ref *nref_next; /* next data node which belongs to the same vnode */
uint32_t nref_lnr; /* nref's LEB number */
uint32_t nref_offset; /* nref's offset */
};
/* Constants for allocating node refs */
/*
* constants for allocating node refs
* they're allocated in blocks
*/
#define REFS_BLOCK_LEN (255/sizeof(struct chfs_node_ref))
#define REF_EMPTY_NODE (UINT_MAX)
#define REF_LINK_TO_NEXT (UINT_MAX - 1)
/* node masks - last two bits of the nodes ("state" of an nref) */
enum {
CHFS_NORMAL_NODE_MASK,
CHFS_UNCHECKED_NODE_MASK,
@ -158,83 +161,84 @@ enum {
#define CHFS_GET_OFS(ofs) (ofs & ~ 3)
/*
* Nrefs are allocated in blocks, get the (in-memory) next. Usually the next
* doesn't belongs to the same vnode.
*/
static inline struct chfs_node_ref *
node_next(struct chfs_node_ref *nref)
{
//dbg("node next: %u : %u\n", nref->nref_lnr, nref->nref_offset);
/* step to the next nref in the same block */
nref++;
//dbg("nref++: %u : %u\n", nref->nref_lnr, nref->nref_offset);
/* REF_LINK_TO_NEXT means that the next node will be in the next block */
if (nref->nref_lnr == REF_LINK_TO_NEXT) {
//dbg("link to next\n");
nref = nref->nref_next;
if (!nref)
return nref;
}
/* REF_EMPTY_NODE means that this is the last node */
if (nref->nref_lnr == REF_EMPTY_NODE) {
//dbg("empty\n");
return NULL;
}
return nref;
}
/**
* struct chfs_dirent - full representation of a directory entry
*/
/* struct chfs_dirent - full representation of a directory entry */
struct chfs_dirent
{
struct chfs_node_ref *nref;
// struct chfs_dirent *next;
TAILQ_ENTRY(chfs_dirent) fds;
uint64_t version;
ino_t vno;
uint32_t nhash;
enum chtype type;
uint8_t nsize;
uint8_t name[0];
/* used by chfs_alloc_dirent and free counterpart */
// size_t alloc_size;
struct chfs_node_ref *nref; /* nref of the dirent */
TAILQ_ENTRY(chfs_dirent) fds; /* directory entries */
uint64_t version; /* version */
ino_t vno; /* vnode number */
uint32_t nhash; /* name hash */
enum chtype type; /* type of the dirent */
uint8_t nsize; /* length of its name */
uint8_t name[0]; /* name of the directory */
};
/* struct chfs_tmp_dnode - used temporarly while building a data node */
struct chfs_tmp_dnode {
struct chfs_full_dnode *node;
uint64_t version;
uint32_t data_crc;
//uint32_t partial_crc;
//uint16_t csize;
uint16_t overlapped;
struct chfs_tmp_dnode *next;
struct chfs_full_dnode *node; /* associated full dnode */
uint64_t version; /* version of the tmp node */
uint32_t data_crc; /* CRC of the data */
uint16_t overlapped; /* is overlapped */
struct chfs_tmp_dnode *next; /* next tmp node */
};
/* struct chfs_tmp_dnode_info - tmp nodes are stored in rb trees */
struct chfs_tmp_dnode_info {
struct rb_node rb_node;
struct chfs_tmp_dnode *tmpnode;
struct rb_node rb_node; /* rb tree entry */
struct chfs_tmp_dnode *tmpnode; /* associated tmp node */
};
/* struct chfs_readinode_info - collection of tmp_dnodes */
struct chfs_readinode_info {
struct rb_tree tdi_root;
struct chfs_tmp_dnode_info *mdata_tn;
uint64_t highest_version;
struct chfs_node_ref *latest_ref;
struct rb_tree tdi_root; /* root of the rb tree */
struct chfs_tmp_dnode_info *mdata_tn; /* metadata (eg: symlink) */
uint64_t highest_version; /* highest version of the nodes */
struct chfs_node_ref *latest_ref; /* latest node reference */
};
/* struct chfs_full_dnode - full data node */
struct chfs_full_dnode {
struct chfs_node_ref *nref;
uint64_t ofs;
uint32_t size;
uint32_t frags;
struct chfs_node_ref *nref; /* nref of the node */
uint64_t ofs; /* offset of the data node */
uint32_t size; /* size of the data node */
uint32_t frags; /* number of fragmentations */
};
/* struct chfs_node_frag - a fragment of a data node */
struct chfs_node_frag {
struct rb_node rb_node;
struct chfs_full_dnode *node;
uint32_t size;
uint64_t ofs;
struct rb_node rb_node; /* rb tree entry */
struct chfs_full_dnode *node; /* associated full dnode */
uint32_t size; /* size of the fragment */
uint64_t ofs; /* offset of the fragment */
};
/* find the first fragment of a data node */
static inline struct chfs_node_frag *
frag_first(struct rb_tree *tree)
{
@ -245,6 +249,7 @@ frag_first(struct rb_tree *tree)
return frag;
}
/* find the last fragment of a data node */
static inline struct chfs_node_frag *
frag_last(struct rb_tree *tree)
{
@ -255,149 +260,129 @@ frag_last(struct rb_tree *tree)
return frag;
}
/* iterate the fragtree */
#define frag_next(tree, frag) (struct chfs_node_frag *)rb_tree_iterate(tree, frag, RB_DIR_RIGHT)
#define frag_prev(tree, frag) (struct chfs_node_frag *)rb_tree_iterate(tree, frag, RB_DIR_LEFT)
/* XXX hack
#ifndef CHFS_FRAG_TREE
#define CHFS_FRAG_TREE
RB_HEAD(chfs_frag_tree, chfs_node_frag);
#endif
*/
/* for prototypes, properly defined in chfs_inode.h */
//struct chfs_inode_ext;
/**
* struct chfs_vnode_cache - in memory representation of a vnode
* @v: pointer to the vnode info node
* @dnode: pointer to the list of data nodes
* @dirents: pointer to the list of directory entries
* @vno_version: used only during scan, holds the current version number of
* chfs_flash_vnode
* @scan_dirents: used only during scan, holds the full representation of
* directory entries of this vnode
* @pvno: parent vnode number
* @nlink: number of links to this vnode
*/
/* struct chfs_vnode_cache - in memory representation of a file or directory */
struct chfs_vnode_cache {
// struct chfs_dirent *scan_dirents;
/*
* void *p must be the first field of the structure
* but I can't remember where we use it and exactly for what
*/
void *p;
struct chfs_dirent_list scan_dirents;
struct chfs_dirent_list scan_dirents; /* used during scanning */
struct chfs_node_ref *v;
struct chfs_node_ref *dnode;
struct chfs_node_ref *dirents;
struct chfs_node_ref *v; /* list of node informations */
struct chfs_node_ref *dnode; /* list of data nodes */
struct chfs_node_ref *dirents; /* list of directory entries */
uint64_t *vno_version;
uint64_t highest_version;
uint64_t *vno_version; /* version of the vnode */
uint64_t highest_version; /* highest version of dnodes */
uint8_t flags;
uint16_t state;
ino_t vno;
ino_t pvno;
struct chfs_vnode_cache* next;
uint32_t nlink;
uint8_t flags; /* flags */
uint16_t state; /* actual state */
ino_t vno; /* vnode number */
ino_t pvno; /* vnode number of parent */
struct chfs_vnode_cache* next; /* next element of vnode cache */
uint32_t nlink; /* number of links to the file */
};
/* struct chfs_eraseblock - representation of an eraseblock */
struct chfs_eraseblock
{
uint32_t lnr;
uint32_t lnr; /* LEB number of the block*/
TAILQ_ENTRY(chfs_eraseblock) queue;
//uint32_t bad_count;
uint32_t unchecked_size;
uint32_t used_size;
uint32_t dirty_size;
uint32_t free_size;
uint32_t wasted_size;
TAILQ_ENTRY(chfs_eraseblock) queue; /* queue entry */
struct chfs_node_ref *first_node;
struct chfs_node_ref *last_node;
uint32_t unchecked_size; /* GC doesn't checked yet */
uint32_t used_size; /* size of nodes */
uint32_t dirty_size; /* size of obsoleted nodes */
uint32_t free_size; /* available size */
uint32_t wasted_size; /* paddings */
/* Next block to be garbage collected */
struct chfs_node_ref *gc_node;
struct chfs_node_ref *first_node; /* first node of the block */
struct chfs_node_ref *last_node; /* last node of the block */
struct chfs_node_ref *gc_node; /* next node from the block
which isn't garbage collected yet */
};
/* eraseblock queue */
TAILQ_HEAD(chfs_eraseblock_queue, chfs_eraseblock);
#define ALLOC_NORMAL 0
#define ALLOC_DELETION 1
#define ALLOC_GC 2
/* space allocation types */
#define ALLOC_NORMAL 0 /* allocating for normal usage (write, etc.) */
#define ALLOC_DELETION 1 /* allocating for deletion */
#define ALLOC_GC 2 /* allocating for the GC */
/* struct garbage_collector_thread - descriptor of GC thread */
struct garbage_collector_thread {
lwp_t *gcth_thread;
kcondvar_t gcth_wakeup;
bool gcth_running;
};
/* states of mounting */
#define CHFS_MP_FLAG_SCANNING 2
#define CHFS_MP_FLAG_BUILDING 4
/**
* struct chfs_mount - CHFS main descriptor structure
* @ebh: eraseblock handler
* @fl_index: index of flash device in the flash layer
* @fs_version: filesystem version descriptor
* @gbl_version: global version number
* @max_vno: max vnode id
* @chm_lock_mountfields:
* @vnocache_hash: hash table of vnode caches
* @vnocache_lock:
* @blocks: array of eraseblocks on flash
* @chm_root: used to protect all fields
* @free_size: free size on the flash
* @dirty_size: dirtied size on flash
* @unchecked_size: size of unchecked data on flash
* @free_queue: queue of free eraseblocks
* @clean_queue: queue of clean eraseblocks
* @dirty_queue: queue of dirty eraseblocks
* @very_dirty_queue: queue of very dirty eraseblocks
* @erase_pending_queue: queue of eraseblocks waiting for erasing
* @erasable_pending_wbuf_queue: queue of eraseblocks waiting for erasing and
* have data to write to them
* @nextblock: next eraseblock to write to
* @nr_free_blocks: number of free blocks on the free_queue
* @nr_erasable_blocks: number of blocks that can be erased and are on the
* erasable_queue
*/
/* struct chfs_mount - CHFS main descriptor structure */
struct chfs_mount {
struct mount *chm_fsmp;
struct chfs_ebh *chm_ebh;
int chm_fs_version;
uint64_t chm_gbl_version;
ino_t chm_max_vno;
ino_t chm_checked_vno;
unsigned int chm_flags;
struct mount *chm_fsmp; /* general mount descriptor */
struct chfs_ebh *chm_ebh; /* eraseblock handler */
int chm_fs_version; /* version of the FS */
uint64_t chm_gbl_version; /* */
ino_t chm_max_vno; /* maximum of vnode numbers */
ino_t chm_checked_vno; /* vnode number of the last checked node */
unsigned int chm_flags; /* filesystem flags */
/* chm_lock_mountfields:
* Used to protect all the following fields. */
/*
* chm_lock_mountfields:
* Used to protect all the following fields.
*/
kmutex_t chm_lock_mountfields;
struct chfs_vnode_cache **chm_vnocache_hash;
/* chm_lock_vnocache:
struct chfs_vnode_cache **chm_vnocache_hash; /* hash table
of vnode caches */
/*
* chm_lock_vnocache:
* Used to protect the vnode cache.
* If you have to lock chm_lock_mountfields and also chm_lock_vnocache,
* you must lock chm_lock_mountfields first. */
* you must lock chm_lock_mountfields first.
*/
kmutex_t chm_lock_vnocache;
struct chfs_eraseblock *chm_blocks;
struct chfs_eraseblock *chm_blocks; /* list of eraseblocks */
struct chfs_node *chm_root;
struct chfs_node *chm_root; /* root node */
uint32_t chm_free_size;
uint32_t chm_dirty_size;
uint32_t chm_unchecked_size;
uint32_t chm_used_size;
uint32_t chm_wasted_size;
/* chm_lock_sizes:
uint32_t chm_free_size; /* available space */
uint32_t chm_dirty_size; /* size of contained obsoleted nodes */
uint32_t chm_unchecked_size; /* GC doesn't checked yet */
uint32_t chm_used_size; /* size of contained nodes */
uint32_t chm_wasted_size; /* padding */
/*
* chm_lock_sizes:
* Used to protect the (free, used, etc.) sizes of the FS
* (and also the sizes of each eraseblock).
* If you have to lock chm_lock_mountfields and also chm_lock_sizes,
* you must lock chm_lock_mountfields first. */
* you must lock chm_lock_mountfields first.
*/
kmutex_t chm_lock_sizes;
/*
* eraseblock queues
* free: completly free
* clean: contains only valid data
* dirty: contains valid and deleted data
* very_dirty: contains mostly deleted data (should be GC'd)
* erasable: doesn't contain valid data (should be erased)
* erase_pending: we can erase blocks from this queue
*/
struct chfs_eraseblock_queue chm_free_queue;
struct chfs_eraseblock_queue chm_clean_queue;
struct chfs_eraseblock_queue chm_dirty_queue;
@ -405,22 +390,26 @@ struct chfs_mount {
struct chfs_eraseblock_queue chm_erasable_pending_wbuf_queue;
struct chfs_eraseblock_queue chm_erase_pending_queue;
/* reserved blocks */
uint8_t chm_resv_blocks_deletion;
uint8_t chm_resv_blocks_write;
uint8_t chm_resv_blocks_gctrigger;
uint8_t chm_resv_blocks_gcmerge;
uint8_t chm_nospc_dirty;
uint8_t chm_vdirty_blocks_gctrigger;
uint8_t chm_vdirty_blocks_gctrigger; /* GC trigger if the filesystem is
very dirty */
struct chfs_eraseblock *chm_nextblock;
struct chfs_eraseblock *chm_nextblock; /* next block for usage */
struct garbage_collector_thread chm_gc_thread;
struct chfs_eraseblock *chm_gcblock;
struct garbage_collector_thread chm_gc_thread; /* descriptor of
GC thread */
struct chfs_eraseblock *chm_gcblock; /* next block for GC */
int chm_nr_free_blocks;
int chm_nr_erasable_blocks;
int chm_nr_free_blocks; /* number of free blocks */
int chm_nr_erasable_blocks; /* number of eraseable blocks */
/* FS constants, used during writing */
int32_t chm_fs_bmask;
int32_t chm_fs_bsize;
int32_t chm_fs_qbmask;
@ -431,21 +420,22 @@ struct chfs_mount {
/* TODO will we use these? */
unsigned int chm_pages_max;
unsigned int chm_pages_used;
unsigned int chm_nodes_max;
unsigned int chm_nodes_cnt;
struct chfs_pool chm_dirent_pool;
struct chfs_pool chm_node_pool;
struct chfs_str_pool chm_str_pool;
/**/
size_t chm_wbuf_pagesize;
unsigned char* chm_wbuf;
size_t chm_wbuf_ofs;
size_t chm_wbuf_len;
/* chm_lock_wbuf:
size_t chm_wbuf_pagesize; /* writebuffer's size */
unsigned char* chm_wbuf; /* writebuffer */
size_t chm_wbuf_ofs; /* actual offset of writebuffer */
size_t chm_wbuf_len; /* actual length of writebuffer */
/*
* chm_lock_wbuf:
* Used to protect the write buffer.
* If you have to lock chm_lock_mountfields and also chm_lock_wbuf,
* you must lock chm_lock_mountfields first. */
* you must lock chm_lock_mountfields first.
*/
krwlock_t chm_lock_wbuf;
};
@ -455,10 +445,11 @@ struct chfs_mount {
* specific ones.
*/
#define CHFS_OFFSET_DOT 0
#define CHFS_OFFSET_DOTDOT 1
#define CHFS_OFFSET_EOF 2
#define CHFS_OFFSET_FIRST 3
/* directory entry offsets */
#define CHFS_OFFSET_DOT 0 /* this */
#define CHFS_OFFSET_DOTDOT 1 /* parent */
#define CHFS_OFFSET_EOF 2 /* after last */
#define CHFS_OFFSET_FIRST 3 /* first */
/*---------------------------------------------------------------------------*/
@ -512,14 +503,16 @@ int chfs_reserve_space_gc(struct chfs_mount *, uint32_t);
int chfs_reserve_space(struct chfs_mount *, uint32_t);
void chfs_mark_node_obsolete(struct chfs_mount *, struct chfs_node_ref *);
/*
* Find out the corresponding vnode cache from an nref.
* Every last element of a linked list of nrefs is the vnode cache.
*/
static inline struct chfs_vnode_cache *
chfs_nref_to_vc(struct chfs_node_ref *nref)
{
/* iterate the whole list */
while (nref->nref_next) {
nref = nref->nref_next;
//dbg("lnr: %u, ofs: %u\n", nref->nref_lnr, nref->nref_offset);
//dbg("vno: %llu\n", ((struct chfs_vnode_cache *)(nref))->vno);
//dbg("scan_dirents: %p\n", ((struct chfs_vnode_cache *)(nref))->scan_dirents);
if (nref->nref_lnr == REF_LINK_TO_NEXT) {
dbg("Link to next!\n");
} else if (nref->nref_lnr == REF_EMPTY_NODE) {
@ -527,12 +520,9 @@ chfs_nref_to_vc(struct chfs_node_ref *nref)
}
}
//dbg("NREF_TO_VC: GET IT\n");
//dbg("nref_next: %p, lnr: %u, ofs: %u\n", nref->nref_next, nref->nref_lnr, nref->nref_offset);
struct chfs_vnode_cache *vc = (struct chfs_vnode_cache *) nref;
dbg("vno: %ju, pvno: %ju, hv: %ju, nlink: %u\n", (intmax_t )vc->vno,
(intmax_t )vc->pvno, (intmax_t )vc->highest_version, vc->nlink);
//return ((struct chfs_vnode_cache *)nref);
return vc;
}
@ -689,11 +679,12 @@ void chfs_itimes(struct chfs_inode *, const struct timespec *,
const struct timespec *, const struct timespec *);
int chfs_update(struct vnode *, const struct timespec *,
const struct timespec *, int);
//int chfs_truncate(struct vnode *, off_t);
/*---------------------------------------------------------------------------*/
/* Some inline functions temporarily placed here */
/* chfs_map_leb - corresponds to ebh_map_leb */
static inline int
chfs_map_leb(struct chfs_mount *chmp, int lnr)
{
@ -707,6 +698,7 @@ chfs_map_leb(struct chfs_mount *chmp, int lnr)
}
/* chfs_unmap_leb - corresponds to ebh_unmap_leb */
static inline int
chfs_unmap_leb(struct chfs_mount *chmp, int lnr)
{
@ -719,6 +711,7 @@ chfs_unmap_leb(struct chfs_mount *chmp, int lnr)
return err;
}
/* chfs_read_leb - corresponds to ebh_read_leb */
static inline int
chfs_read_leb(struct chfs_mount *chmp, int lnr, char *buf,
int offset, int len, size_t *retlen)
@ -733,6 +726,7 @@ chfs_read_leb(struct chfs_mount *chmp, int lnr, char *buf,
return err;
}
/* chfs_write_leb - corresponds to ebh_write_leb */
static inline int chfs_write_leb(struct chfs_mount *chmp, int lnr, char *buf,
int offset, int len, size_t *retlen)
{
@ -745,9 +739,6 @@ static inline int chfs_write_leb(struct chfs_mount *chmp, int lnr, char *buf,
return err;
}
/******************************************************************************/
/* Code from dummyfs.h */
/******************************************************************************/
/* --------------------------------------------------------------------- */
#define CHFS_PAGES_RESERVED (4 * 1024 * 1024 / PAGE_SIZE)

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_args.h,v 1.1 2011/11/24 15:51:31 ahoka Exp $ */
/* $NetBSD: chfs_args.h,v 1.2 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -35,19 +35,10 @@
#define CHFS_ARGS_VERSION 1
/**
* struct chfs_args - arguments needed when mounting filesystem
* @fl_index: index of the flash device in the flash layer
*/
/* struct chfs_args - arguments needed when mounting filesystem */
struct chfs_args {
//int ca_version;
char *fspec;
int fl_index;
/* Root node attributes. */
/*uid_t ca_root_uid;
gid_t ca_root_gid;
mode_t ca_root_mode;*/
int fl_index; /* index of the flash device in the flash layer */
};
#endif /* _FS_CHFS_CHFS_ARGS_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_build.c,v 1.4 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_build.c,v 1.5 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -31,9 +31,13 @@
*/
#include "chfs.h"
//#include </root/xipffs/netbsd.chfs/chfs.h>
/*
* chfs_calc_trigger_levels - setup filesystem parameters
* Setups filesystem parameters (reserved blocks and GC trigger level)
* for a specific flash.
*/
void
chfs_calc_trigger_levels(struct chfs_mount *chmp)
{
@ -41,7 +45,7 @@ chfs_calc_trigger_levels(struct chfs_mount *chmp)
chmp->chm_resv_blocks_deletion = 2;
size = chmp->chm_ebh->flash_size / 50; //2% of flash size
size = chmp->chm_ebh->flash_size / 50; /* 2% of flash size */
size += chmp->chm_ebh->peb_nr * 100;
size += chmp->chm_ebh->eb_size - 1;
@ -56,11 +60,9 @@ chfs_calc_trigger_levels(struct chfs_mount *chmp)
}
/**
/*
* chfs_build_set_vnodecache_nlink - set pvno and nlink in vnodecaches
* @chmp: CHFS main descriptor structure
* @vc: vnode cache
* This function travels @vc's directory entries and sets the pvno and nlink
* Travels vc's directory entries and sets the pvno and nlink
* attribute of the vnode where the dirent's vno points.
*/
void
@ -68,9 +70,7 @@ chfs_build_set_vnodecache_nlink(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc)
{
struct chfs_dirent *fd, *tmpfd;
//dbg("set nlink\n");
// for (fd = vc->scan_dirents; fd; fd = fd->next) {
TAILQ_FOREACH_SAFE(fd, &vc->scan_dirents, fds, tmpfd) {
struct chfs_vnode_cache *child_vc;
@ -95,26 +95,20 @@ chfs_build_set_vnodecache_nlink(struct chfs_mount *chmp,
fd->name, (unsigned long long)fd->vno,
(unsigned long long)vc->vno);
} else {
//dbg("child_vc->pvno =
// vc->vno; pvno = %d\n", child_vc->pvno);
child_vc->pvno = vc->vno;
}
}
child_vc->nlink++;
//dbg("child_vc->nlink++;\n");
//child_vc->nlink++;
vc->nlink++;
}
}
/**
/*
* chfs_build_remove_unlinked vnode
*/
/* static */
void
chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc,
// struct chfs_dirent **unlinked)
struct chfs_dirent_list *unlinked)
{
struct chfs_node_ref *nref;
@ -125,7 +119,7 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
nref = vc->dnode;
// The vnode cache is at the end of the data node's chain
/* The vnode cache is at the end of the data node's chain */
while (nref != (struct chfs_node_ref *)vc) {
struct chfs_node_ref *next = nref->nref_next;
dbg("mark dnode\n");
@ -134,7 +128,7 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
}
vc->dnode = (struct chfs_node_ref *)vc;
nref = vc->dirents;
// The vnode cache is at the end of the dirent node's chain
/* The vnode cache is at the end of the dirent node's chain */
while (nref != (struct chfs_node_ref *)vc) {
struct chfs_node_ref *next = nref->nref_next;
dbg("mark dirent\n");
@ -144,9 +138,7 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
vc->dirents = (struct chfs_node_ref *)vc;
if (!TAILQ_EMPTY(&vc->scan_dirents)) {
TAILQ_FOREACH_SAFE(fd, &vc->scan_dirents, fds, tmpfd) {
// while (vc->scan_dirents) {
struct chfs_vnode_cache *child_vc;
// fd = vc->scan_dirents;
dbg("dirent dump:\n");
dbg(" ->vno: %llu\n", (unsigned long long)fd->vno);
dbg(" ->version: %llu\n", (unsigned long long)fd->version);
@ -154,7 +146,6 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
dbg(" ->nsize: %d\n", fd->nsize);
dbg(" ->name: %s\n", fd->name);
dbg(" ->type: %d\n", fd->type);
// vc->scan_dirents = fd->next;
TAILQ_REMOVE(&vc->scan_dirents, fd, fds);
if (!fd->vno) {
@ -168,16 +159,13 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
chfs_free_dirent(fd);
continue;
}
/**
/*
* Decrease nlink in child. If it is 0, add to unlinked
* dirents or just free it otherwise.
*/
child_vc->nlink--;
if (!child_vc->nlink) {
//dbg("nlink is 0\n");
// fd->next = *unlinked;
// *unlinked = fd;
// XXX HEAD or TAIL?
// original code did HEAD, but we could add
// it to the TAIL easily with TAILQ.
@ -204,12 +192,11 @@ chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
dbg("END\n");
}
/**
/*
* chfs_build_filesystem - build in-memory representation of filesystem
* @chmp: super block information
*
* Step 1:
* This function scans through the eraseblocks mapped in EBH.
* Scans through the eraseblocks mapped in EBH.
* During scan builds up the map of vnodes and directory entries and puts them
* into the vnode_cache.
* Step 2:
@ -223,7 +210,6 @@ chfs_build_filesystem(struct chfs_mount *chmp)
int i,err = 0;
struct chfs_vnode_cache *vc;
struct chfs_dirent *fd, *tmpfd;
// struct chfs_dirent *unlinked = NULL;
struct chfs_node_ref **nref;
struct chfs_dirent_list unlinked;
struct chfs_vnode_cache *notregvc;
@ -232,17 +218,13 @@ chfs_build_filesystem(struct chfs_mount *chmp)
mutex_enter(&chmp->chm_lock_mountfields);
/**
* Step 1
*/
/* Step 1 */
chmp->chm_flags |= CHFS_MP_FLAG_SCANNING;
for (i = 0; i < chmp->chm_ebh->peb_nr; i++) {
//dbg("processing block: %d\n", i);
chmp->chm_blocks[i].lnr = i;
chmp->chm_blocks[i].free_size = chmp->chm_ebh->eb_size;
//If the LEB is add to free list skip it.
/* If the LEB is add to free list skip it. */
if (chmp->chm_ebh->lmap[i] < 0) {
//dbg("block %d is unmapped\n", i);
TAILQ_INSERT_TAIL(&chmp->chm_free_queue,
&chmp->chm_blocks[i], queue);
chmp->chm_nr_free_blocks++;
@ -261,7 +243,6 @@ chfs_build_filesystem(struct chfs_mount *chmp)
&chmp->chm_blocks[i], queue);
break;
case CHFS_BLK_STATE_PARTDIRTY:
//dbg("free size: %d\n", chmp->chm_blocks[i].free_size);
if (chmp->chm_blocks[i].free_size > chmp->chm_wbuf_pagesize &&
(!chmp->chm_nextblock ||
chmp->chm_blocks[i].free_size >
@ -307,9 +288,7 @@ chfs_build_filesystem(struct chfs_mount *chmp)
* Need check at erase + write + read...
*/
/**
* Step 2
*/
/* Step 2 */
chmp->chm_flags |= CHFS_MP_FLAG_BUILDING;
for (i = 0; i < VNODECACHE_SIZE; i++) {
vc = chmp->chm_vnocache_hash[i];
@ -321,10 +300,7 @@ chfs_build_filesystem(struct chfs_mount *chmp)
}
}
/**
* Step 3
* Scan for vnodes with 0 nlink.
*/
/* Step 3 */
for (i = 0; i < VNODECACHE_SIZE; i++) {
vc = chmp->chm_vnocache_hash[i];
while (vc) {
@ -333,18 +309,13 @@ chfs_build_filesystem(struct chfs_mount *chmp)
continue;
}
//dbg("remove unlinked start i: %d\n", i);
chfs_build_remove_unlinked_vnode(chmp,
vc, &unlinked);
//dbg("remove unlinked end\n");
vc = vc->next;
}
}
/* Remove the newly unlinked vnodes. They are on the unlinked list */
TAILQ_FOREACH_SAFE(fd, &unlinked, fds, tmpfd) {
// while (unlinked) {
// fd = unlinked;
// unlinked = fd->next;
TAILQ_REMOVE(&unlinked, fd, fds);
mutex_enter(&chmp->chm_lock_vnocache);
vc = chfs_vnode_cache_get(chmp, fd->vno);
@ -363,20 +334,12 @@ chfs_build_filesystem(struct chfs_mount *chmp)
vc = chmp->chm_vnocache_hash[i];
while (vc) {
TAILQ_FOREACH_SAFE(fd, &vc->scan_dirents, fds, tmpfd) {
// while (vc->scan_dirents) {
// fd = vc->scan_dirents;
// vc->scan_dirents = fd->next;
TAILQ_REMOVE(&vc->scan_dirents, fd, fds);
if (fd->vno == 0) {
//for (nref = &vc->dirents;
// *nref != fd->nref;
// nref = &((*nref)->next));
nref = &fd->nref;
*nref = fd->nref->nref_next;
//fd->nref->nref_next = NULL;
} else if (fd->type == CHT_DIR) {
//set state every non-VREG file's vc
/* set state every non-VREG file's vc */
mutex_enter(&chmp->chm_lock_vnocache);
notregvc = chfs_vnode_cache_get(chmp, fd->vno);
notregvc->state = VNO_STATE_PRESENT;
@ -384,13 +347,12 @@ chfs_build_filesystem(struct chfs_mount *chmp)
}
chfs_free_dirent(fd);
}
// vc->scan_dirents = NULL;
KASSERT(TAILQ_EMPTY(&vc->scan_dirents));
vc = vc->next;
}
}
//Set up chmp->chm_wbuf_ofs for the first write
/* Set up chmp->chm_wbuf_ofs for the first write */
if (chmp->chm_nextblock) {
dbg("free_size: %d\n", chmp->chm_nextblock->free_size);
chmp->chm_wbuf_ofs = chmp->chm_ebh->eb_size -

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_erase.c,v 1.1 2011/11/24 15:51:31 ahoka Exp $ */
/* $NetBSD: chfs_erase.c,v 1.2 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -31,28 +31,17 @@
* SUCH DAMAGE.
*/
/*
* chfs_erase.c
*
* Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>,
* ...
* University of Szeged, Hungary
*/
#include "chfs.h"
/**
/*
* chfs_remap_leb - unmap and then map a leb
* @chmp: chfs mount structure
*
* This function gets an eraseblock from the erasable queue, unmaps it through
* Gets an eraseblock from the erasable queue, unmaps it through
* EBH and maps another eraseblock to the same LNR.
* EBH will find a free eraseblock if any or will erase one if there isn't any
* free, just dirty block.
*
* Returns zero on case of success, errorcode otherwise.
*
* Needs more brainstorming here.
*/
int
@ -63,8 +52,6 @@ chfs_remap_leb(struct chfs_mount *chmp)
dbg("chfs_remap_leb\n");
uint32_t dirty, unchecked, used, free, wasted;
//dbg("chmp->chm_nr_erasable_blocks: %d\n", chmp->chm_nr_erasable_blocks);
//dbg("ltree: %p ecl: %p\n", &chmp->chm_ebh->ltree_lock, &chmp->chm_lock_sizes);
KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
KASSERT(mutex_owned(&chmp->chm_lock_sizes));
@ -102,7 +89,7 @@ chfs_remap_leb(struct chfs_mount *chmp)
free = cheb->free_size;
wasted = cheb->wasted_size;
// Free allocated node references for this eraseblock
/* Free allocated node references for this eraseblock */
chfs_free_node_refs(cheb);
err = chfs_unmap_leb(chmp, cheb->lnr);
@ -112,7 +99,7 @@ chfs_remap_leb(struct chfs_mount *chmp)
err = chfs_map_leb(chmp, cheb->lnr);
if (err)
return err;
// Reset state to default and change chmp sizes too
/* Reset state to default and change chmp sizes too */
chfs_change_size_dirty(chmp, cheb, -dirty);
chfs_change_size_unchecked(chmp, cheb, -unchecked);
chfs_change_size_used(chmp, cheb, -used);
@ -127,7 +114,7 @@ chfs_remap_leb(struct chfs_mount *chmp)
cheb->first_node = NULL;
cheb->last_node = NULL;
//put it to free_queue
/* put it to free_queue */
TAILQ_INSERT_TAIL(&chmp->chm_free_queue, cheb, queue);
chmp->chm_nr_free_blocks++;
dbg("remaped (free: %d, erasable: %d)\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks);

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_gc.c,v 1.3 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_gc.c,v 1.4 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -60,21 +60,23 @@ int chfs_gcollect_dnode(struct chfs_mount *,
struct chfs_eraseblock *, struct chfs_inode *,
struct chfs_full_dnode *, uint32_t, uint32_t);
/* must be called with chm_lock_mountfields held */
/*
* chfs_gc_trigger - wakes up GC thread, if it should run
* Must be called with chm_lock_mountfields held.
*/
void
chfs_gc_trigger(struct chfs_mount *chmp)
{
struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
//mutex_enter(&chmp->chm_lock_sizes);
if (gc->gcth_running &&
chfs_gc_thread_should_wake(chmp)) {
cv_signal(&gc->gcth_wakeup);
}
//mutex_exit(&chmp->chm_lock_sizes);
}
/* chfs_gc_thread - garbage collector's thread */
void
chfs_gc_thread(void *data)
{
@ -90,10 +92,8 @@ chfs_gc_thread(void *data)
* do it otherway with the current design...
*/
if (chfs_gc_thread_should_wake(chmp)) {
// mutex_exit(&chmp->chm_lock_mountfields);
if (chfs_gcollect_pass(chmp) == ENOSPC) {
mutex_exit(&chmp->chm_lock_mountfields);
dbg_gc("No space for garbage collection\n");
panic("No space for garbage collection\n");
/* XXX why break here? i have added a panic
* here to see if it gets triggered -ahoka
@ -101,7 +101,6 @@ chfs_gc_thread(void *data)
break;
}
/* XXX gcollect_pass drops the mutex */
//mutex_enter(&chmp->chm_lock_mountfields);
}
cv_timedwait_sig(&gc->gcth_wakeup,
@ -113,6 +112,7 @@ chfs_gc_thread(void *data)
kthread_exit(0);
}
/* chfs_gc_thread_start - starts GC */
void
chfs_gc_thread_start(struct chfs_mount *chmp)
{
@ -126,12 +126,13 @@ chfs_gc_thread_start(struct chfs_mount *chmp)
"chfsgcth");
}
/* chfs_gc_thread_start - stops GC */
void
chfs_gc_thread_stop(struct chfs_mount *chmp)
{
struct garbage_collector_thread *gc = &chmp->chm_gc_thread;
/* check if it is actually running. if not, do nothing */
/* check if it is actually running */
if (gc->gcth_running) {
gc->gcth_running = false;
} else {
@ -148,7 +149,11 @@ chfs_gc_thread_stop(struct chfs_mount *chmp)
cv_destroy(&gc->gcth_wakeup);
}
/* must be called with chm_lock_mountfields held */
/*
* chfs_gc_thread_should_wake - checks if GC thread should wake up
* Must be called with chm_lock_mountfields held.
* Returns 1, if GC should wake up and 0 else.
*/
int
chfs_gc_thread_should_wake(struct chfs_mount *chmp)
{
@ -158,11 +163,13 @@ chfs_gc_thread_should_wake(struct chfs_mount *chmp)
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
/* Erase pending queue is not empty. */
if (!TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
dbg_gc("erase_pending\n");
return 1;
}
/* There is something unchecked in the filesystem. */
if (chmp->chm_unchecked_size) {
dbg_gc("unchecked\n");
return 1;
@ -171,6 +178,7 @@ chfs_gc_thread_should_wake(struct chfs_mount *chmp)
dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks *
chmp->chm_ebh->eb_size;
/* Number of free and erasable blocks are critical. */
if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks <
chmp->chm_resv_blocks_gctrigger && (dirty > chmp->chm_nospc_dirty)) {
dbg_gc("free: %d + erasable: %d < resv: %d\n",
@ -182,6 +190,7 @@ chfs_gc_thread_should_wake(struct chfs_mount *chmp)
return 1;
}
/* There is too much very dirty blocks. */
TAILQ_FOREACH(cheb, &chmp->chm_very_dirty_queue, queue) {
nr_very_dirty++;
if (nr_very_dirty == chmp->chm_vdirty_blocks_gctrigger) {
@ -190,18 +199,19 @@ chfs_gc_thread_should_wake(struct chfs_mount *chmp)
}
}
/* Everythin OK, GC shouldn't run. */
return 0;
}
/* chfs_gc_release_inode - does nothing yet */
void
chfs_gc_release_inode(struct chfs_mount *chmp,
struct chfs_inode *ip)
{
dbg_gc("release inode\n");
//mutex_exit(&ip->inode_lock);
//vput(ITOV(ip));
}
/* chfs_gc_fetch_inode - assign the given inode to the GC */
struct chfs_inode *
chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
uint32_t unlinked)
@ -222,16 +232,12 @@ chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
return NULL;
}
if (vc->state != VNO_STATE_CHECKEDABSENT) {
//sleep_on_spinunlock(&chmp->chm_lock_vnocache);
mutex_exit(&chmp->chm_lock_vnocache);
/* XXX why do we need the delay here?! */
// kpause("chvncabs", true, mstohz(50), NULL);
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
cv_timedwait_sig(
&chmp->chm_gc_thread.gcth_wakeup,
&chmp->chm_lock_mountfields, mstohz(50));
// KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
} else {
mutex_exit(&chmp->chm_lock_vnocache);
}
@ -240,18 +246,17 @@ chfs_gc_fetch_inode(struct chfs_mount *chmp, ino_t vno,
} else {
dbg_gc("vnode lookup\n");
vp = chfs_vnode_lookup(chmp, vno);
//VFS_VGET(chmp->chm_fsmp, vno, &vp);
}
dbg_gc("vp to ip\n");
ip = VTOI(vp);
KASSERT(ip);
//mutex_enter(&ip->inode_lock);
return ip;
}
extern rb_tree_ops_t frag_rbtree_ops;
/* chfs_check - checks an inode with minimal initialization */
int
chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
{
@ -261,6 +266,7 @@ chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
struct vnode *vp;
int ret;
/* Get a new inode. */
ip = pool_get(&chfs_inode_pool, PR_WAITOK);
if (!ip) {
return ENOMEM;
@ -268,6 +274,7 @@ chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
vp = kmem_zalloc(sizeof(struct vnode), KM_SLEEP);
/* Minimal initialization. */
ip->chvc = chvc;
ip->vp = vp;
@ -276,6 +283,7 @@ chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
TAILQ_INIT(&ip->dents);
/* Build the node. */
mutex_exit(&chmp->chm_lock_vnocache);
ret = chfs_read_inode_internal(chmp, ip);
mutex_enter(&chmp->chm_lock_vnocache);
@ -283,11 +291,13 @@ chfs_check(struct chfs_mount *chmp, struct chfs_vnode_cache *chvc)
chfs_clear_inode(chmp, ip);
}
/* Release inode. */
pool_put(&chfs_inode_pool, ip);
return ret;
}
/* chfs_clear_inode - kills a minimal inode */
void
chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
{
@ -302,21 +312,26 @@ chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
//bool deleted = (chvc && !(chvc->pvno || chvc->nlink));
int deleted = (chvc && !(chvc->pvno | chvc->nlink));
/* Set actual state. */
if (chvc && chvc->state != VNO_STATE_CHECKING) {
chvc->state = VNO_STATE_CLEARING;
}
/* Remove vnode information. */
while (deleted && chvc->v != (struct chfs_node_ref *)chvc) {
nref = chvc->v;
chfs_remove_and_obsolete(chmp, chvc, nref, &chvc->v);
}
/* Destroy data. */
chfs_kill_fragtree(chmp, &ip->fragtree);
/* Clear dirents. */
TAILQ_FOREACH_SAFE(fd, &ip->dents, fds, tmpfd) {
chfs_free_dirent(fd);
}
/* Remove node from vnode cache. */
if (chvc && chvc->state == VNO_STATE_CHECKING) {
chvc->state = VNO_STATE_CHECKEDABSENT;
if ((struct chfs_vnode_cache *)chvc->v == chvc &&
@ -326,6 +341,7 @@ chfs_clear_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
}
}
/* find_gc_block - finds the next block for GC */
struct chfs_eraseblock *
find_gc_block(struct chfs_mount *chmp)
{
@ -334,17 +350,15 @@ find_gc_block(struct chfs_mount *chmp)
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
/* Get a random number. */
struct timespec now;
vfs_timestamp(&now);
int n = now.tv_nsec % 128;
//dbg_gc("n = %d\n", n);
again:
/* if (!TAILQ_EMPTY(&chmp->chm_bad_used_queue) && chmp->chm_nr_free_blocks > chmp->chm_nr_resv_blocks_gcbad) {
dbg_gc("Picking block from bad_used_queue to GC next\n");
nextqueue = &chmp->chm_bad_used_queue;
} else */if (n<50 && !TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
/* Find an eraseblock queue. */
if (n<50 && !TAILQ_EMPTY(&chmp->chm_erase_pending_queue)) {
dbg_gc("Picking block from erase_pending_queue to GC next\n");
nextqueue = &chmp->chm_erase_pending_queue;
} else if (n<110 && !TAILQ_EMPTY(&chmp->chm_very_dirty_queue) ) {
@ -381,18 +395,19 @@ again:
return NULL;
}
/* Get the first block of the queue. */
ret = TAILQ_FIRST(nextqueue);
if (chmp->chm_nextblock) {
dbg_gc("nextblock num: %u - gcblock num: %u\n",
chmp->chm_nextblock->lnr, ret->lnr);
if (ret == chmp->chm_nextblock)
goto again;
//KASSERT(ret != chmp->chm_nextblock);
//dbg_gc("first node lnr: %u ofs: %u\n", ret->first_node->lnr, ret->first_node->offset);
//dbg_gc("last node lnr: %u ofs: %u\n", ret->last_node->lnr, ret->last_node->offset);
}
TAILQ_REMOVE(nextqueue, ret, queue);
/* Set GC block. */
chmp->chm_gcblock = ret;
/* Set GC node. */
ret->gc_node = ret->first_node;
if (!ret->gc_node) {
@ -404,7 +419,7 @@ again:
return ret;
}
/* chfs_gcollect_pass - this is the main function of GC */
int
chfs_gcollect_pass(struct chfs_mount *chmp)
{
@ -419,13 +434,16 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
/* Check all vnodes. */
for (;;) {
mutex_enter(&chmp->chm_lock_sizes);
/* Check unchecked size. */
dbg_gc("unchecked size == %u\n", chmp->chm_unchecked_size);
if (!chmp->chm_unchecked_size)
break;
/* Compare vnode number to the maximum. */
if (chmp->chm_checked_vno > chmp->chm_max_vno) {
mutex_exit(&chmp->chm_lock_sizes);
dbg_gc("checked_vno (#%llu) > max_vno (#%llu)\n",
@ -440,6 +458,8 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
dbg_gc("checking vno #%llu\n",
(unsigned long long)chmp->chm_checked_vno);
dbg_gc("get vnode cache\n");
/* OK, Get and check the vnode cache. */
vc = chfs_vnode_cache_get(chmp, chmp->chm_checked_vno++);
if (!vc) {
@ -454,14 +474,17 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
continue;
}
/* Find out the state of the vnode. */
dbg_gc("switch\n");
switch (vc->state) {
case VNO_STATE_CHECKEDABSENT:
/* FALLTHROUGH */
case VNO_STATE_PRESENT:
mutex_exit(&chmp->chm_lock_vnocache);
continue;
case VNO_STATE_GC:
/* FALLTHROUGH */
case VNO_STATE_CHECKING:
mutex_exit(&chmp->chm_lock_vnocache);
dbg_gc("VNO_STATE GC or CHECKING\n");
@ -473,8 +496,6 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
/* XXX why do we need the delay here?! */
kpause("chvncrea", true, mstohz(50), NULL);
// sleep_on_spinunlock(&chmp->chm_lock_vnocache);
// KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
return 0;
default:
@ -487,20 +508,21 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
;
}
/* We found an unchecked vnode. */
vc->state = VNO_STATE_CHECKING;
/* XXX check if this is too heavy to call under
* chm_lock_vnocache
*/
ret = chfs_check(chmp, vc);
dbg_gc("set state\n");
vc->state = VNO_STATE_CHECKEDABSENT;
mutex_exit(&chmp->chm_lock_vnocache);
return ret;
}
/* Get GC block. */
eb = chmp->chm_gcblock;
if (!eb) {
@ -522,13 +544,13 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
goto eraseit;
}
/* Get GC node. */
nref = eb->gc_node;
//dbg_gc("gc use: %u\n", chmp->chm_nextblock->lnr);
//dbg_gc("nref: %u %u\n", nref->nref_lnr, nref->nref_offset);
gcblock_dirty = eb->dirty_size;
/* Find a node which wasn't obsoleted yet.
* Obsoleted nodes will be simply deleted after the whole block has checked. */
while(CHFS_REF_OBSOLETE(nref)) {
//dbg_gc("obsoleted nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
#ifdef DBG_MSG_GC
if (nref == chmp->chm_blocks[nref->nref_lnr].last_node) {
dbg_gc("THIS NODE IS THE LAST NODE OF ITS EB\n");
@ -536,18 +558,19 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
#endif
nref = node_next(nref);
if (!nref) {
//dbg_gc("!nref\n");
eb->gc_node = nref;
mutex_exit(&chmp->chm_lock_sizes);
panic("CHFS BUG - nref is NULL)\n");
}
}
/* We found a "not obsoleted" node. */
eb->gc_node = nref;
//dbg_gc("nref the chosen one lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
KASSERT(nref->nref_lnr == chmp->chm_gcblock->lnr);
/* Check if node is in any chain. */
if (!nref->nref_next) {
//dbg_gc("!nref->nref_next\n");
/* This node is not in any chain. Simply collect it, or obsolete. */
mutex_exit(&chmp->chm_lock_sizes);
if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
chfs_gcollect_pristine(chmp, eb, NULL, nref);
@ -559,39 +582,38 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
mutex_exit(&chmp->chm_lock_sizes);
//dbg_gc("enter vnocache lock on #%llu\n", vc->vno);
mutex_enter(&chmp->chm_lock_vnocache);
dbg_gc("nref lnr: %u - offset: %u\n", nref->nref_lnr, nref->nref_offset);
vc = chfs_nref_to_vc(nref);
/* Check the state of the node. */
dbg_gc("switch\n");
switch(vc->state) {
case VNO_STATE_CHECKEDABSENT:
if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
vc->state = VNO_STATE_GC;
}
break;
if (CHFS_REF_FLAGS(nref) == CHFS_PRISTINE_NODE_MASK) {
vc->state = VNO_STATE_GC;
}
break;
case VNO_STATE_PRESENT:
break;
break;
case VNO_STATE_UNCHECKED:
/* FALLTHROUGH */
case VNO_STATE_CHECKING:
/* FALLTHROUGH */
case VNO_STATE_GC:
mutex_exit(&chmp->chm_lock_vnocache);
panic("CHFS BUG - vc state unchecked,"
" checking or gc (vno #%llu, num #%d)\n",
(unsigned long long)vc->vno, vc->state);
mutex_exit(&chmp->chm_lock_vnocache);
panic("CHFS BUG - vc state unchecked,"
" checking or gc (vno #%llu, num #%d)\n",
(unsigned long long)vc->vno, vc->state);
case VNO_STATE_READING:
mutex_exit(&chmp->chm_lock_vnocache);
/* XXX why do we need the delay here?! */
kpause("chvncrea", true, mstohz(50), NULL);
// sleep_on_spinunlock(&chmp->chm_lock_vnocache);
// KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
return 0;
/* Node is in use at this time. */
mutex_exit(&chmp->chm_lock_vnocache);
kpause("chvncrea", true, mstohz(50), NULL);
return 0;
}
if (vc->state == VNO_STATE_GC) {
@ -606,6 +628,7 @@ chfs_gcollect_pass(struct chfs_mount *chmp)
mutex_enter(&chmp->chm_lock_vnocache);
}
/* Collect living node. */
vno = vc->vno;
pvno = vc->pvno;
nlink = vc->nlink;
@ -639,6 +662,7 @@ eraseit:
dbg_gc("eraseit\n");
if (chmp->chm_gcblock) {
/* This is only for debugging. */
dbg_gc("eb used size = %u\n", chmp->chm_gcblock->used_size);
dbg_gc("eb free size = %u\n", chmp->chm_gcblock->free_size);
dbg_gc("eb dirty size = %u\n", chmp->chm_gcblock->dirty_size);
@ -653,6 +677,7 @@ eraseit:
}
/* Check the state of GC block. */
if (chmp->chm_gcblock && chmp->chm_gcblock->dirty_size +
chmp->chm_gcblock->wasted_size == chmp->chm_ebh->eb_size) {
dbg_gc("Block at leb #%u completely obsoleted by GC, "
@ -672,6 +697,7 @@ eraseit:
}
/* chfs_gcollect_pristine - collects a pristine node */
int
chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
struct chfs_vnode_cache *chvc, struct chfs_node_ref *nref)
@ -696,6 +722,7 @@ chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
ofs = CHFS_GET_OFS(nref->nref_offset);
/* Read header. */
ret = chfs_read_leb(chmp, nref->nref_lnr, data, ofs, totlen, &retlen);
if (ret) {
dbg_gc("reading error\n");
@ -706,7 +733,8 @@ chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
return EIO;
}
nhdr = (struct chfs_flash_node_hdr *)data;
/* check the header */
/* Check the header. */
if (le16toh(nhdr->magic) != CHFS_FS_MAGIC_BITMASK) {
dbg_gc("node header magic number error\n");
return EBADF;
@ -717,41 +745,46 @@ chfs_gcollect_pristine(struct chfs_mount *chmp, struct chfs_eraseblock *cheb,
return EBADF;
}
/* Read the remaining parts. */
switch(le16toh(nhdr->type)) {
case CHFS_NODETYPE_VNODE:
fvnode = (struct chfs_flash_vnode *)data;
/* vnode information node */
fvnode = (struct chfs_flash_vnode *)data;
crc = crc32(0, (uint8_t *)fvnode, sizeof(struct chfs_flash_vnode) - 4);
if (crc != le32toh(fvnode->node_crc)) {
dbg_gc("vnode crc error\n");
return EBADF;
}
break;
dbg_gc("vnode crc error\n");
return EBADF;
}
break;
case CHFS_NODETYPE_DIRENT:
fdirent = (struct chfs_flash_dirent_node *)data;
/* dirent node */
fdirent = (struct chfs_flash_dirent_node *)data;
crc = crc32(0, (uint8_t *)fdirent, sizeof(struct chfs_flash_dirent_node) - 4);
if (crc != le32toh(fdirent->node_crc)) {
dbg_gc("dirent crc error\n");
return EBADF;
}
dbg_gc("dirent crc error\n");
return EBADF;
}
crc = crc32(0, fdirent->name, fdirent->nsize);
if (crc != le32toh(fdirent->name_crc)) {
dbg_gc("dirent name crc error\n");
return EBADF;
}
break;
dbg_gc("dirent name crc error\n");
return EBADF;
}
break;
case CHFS_NODETYPE_DATA:
fdata = (struct chfs_flash_data_node *)data;
/* data node */
fdata = (struct chfs_flash_data_node *)data;
crc = crc32(0, (uint8_t *)fdata, sizeof(struct chfs_flash_data_node) - 4);
if (crc != le32toh(fdata->node_crc)) {
dbg_gc("data node crc error\n");
return EBADF;
}
break;
dbg_gc("data node crc error\n");
return EBADF;
}
break;
default:
if (chvc) {
dbg_gc("unknown node have vnode cache\n");
return EBADF;
}
/* unknown node */
if (chvc) {
dbg_gc("unknown node have vnode cache\n");
return EBADF;
}
}
/* CRC's OK, write node to its new place */
retry:
@ -766,12 +799,14 @@ retry:
ofs = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
newnref->nref_offset = ofs;
/* write out the whole node */
vec.iov_base = (void *)data;
vec.iov_len = totlen;
mutex_enter(&chmp->chm_lock_sizes);
ret = chfs_write_wbuf(chmp, &vec, 1, ofs, &retlen);
if (ret || retlen != totlen) {
/* error while writing */
chfs_err("error while writing out to the media\n");
chfs_err("err: %d | size: %zu | retlen : %zu\n",
ret, totlen, retlen);
@ -782,14 +817,15 @@ retry:
return EIO;
}
/* try again */
retries++;
mutex_exit(&chmp->chm_lock_sizes);
goto retry;
}
/* update vnode information */
mutex_exit(&chmp->chm_lock_sizes);
//TODO should we set free_size?
//chfs_mark_node_obsolete(chmp, nref);
mutex_enter(&chmp->chm_lock_vnocache);
chfs_add_vnode_ref_to_vc(chmp, chvc, newnref);
mutex_exit(&chmp->chm_lock_vnocache);
@ -797,6 +833,7 @@ retry:
}
/* chfs_gcollect_live - collects a living node */
int
chfs_gcollect_live(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, struct chfs_node_ref *nref,
@ -827,7 +864,7 @@ chfs_gcollect_live(struct chfs_mount *chmp,
goto upnout;
}
/* find fn */
/* Find data node. */
dbg_gc("find full dnode\n");
for(frag = frag_first(&ip->fragtree);
frag; frag = frag_next(&ip->fragtree, frag)) {
@ -852,12 +889,10 @@ chfs_gcollect_live(struct chfs_mount *chmp,
if (ret != EBADF)
goto upnout;
}
//ret = chfs_gcollect_hole(chmp, cheb, ip, fn, start, end);
ret = chfs_gcollect_dnode(chmp, cheb, ip, fn, start, end);
goto upnout;
}
/* Is it a dirent? */
dbg_gc("find full dirent\n");
is_dirent = false;
@ -869,8 +904,10 @@ chfs_gcollect_live(struct chfs_mount *chmp,
}
if (is_dirent && fd->vno) {
/* Living dirent. */
ret = chfs_gcollect_dirent(chmp, cheb, ip, fd);
} else if (is_dirent) {
/* Already deleted dirent. */
ret = chfs_gcollect_deletion_dirent(chmp, cheb, ip, fd);
} else {
dbg_gc("Nref at leb #%u offset 0x%08x wasn't in node list"
@ -887,17 +924,21 @@ upnout:
return ret;
}
/* chfs_gcollect_vnode - collects a vnode information node */
int
chfs_gcollect_vnode(struct chfs_mount *chmp, struct chfs_inode *ip)
{
int ret;
dbg_gc("gcollect_vnode\n");
/* Simply write the new vnode information to the flash
* with GC's space allocation */
ret = chfs_write_flash_vnode(chmp, ip, ALLOC_GC);
return ret;
}
/* chfs_gcollect_dirent - collects a dirent */
int
chfs_gcollect_dirent(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, struct chfs_inode *parent,
@ -907,6 +948,7 @@ chfs_gcollect_dirent(struct chfs_mount *chmp,
struct chfs_inode *ip;
dbg_gc("gcollect_dirent\n");
/* Find vnode. */
vnode = chfs_vnode_lookup(chmp, fd->vno);
/* XXX maybe KASSERT or panic on this? */
@ -916,16 +958,21 @@ chfs_gcollect_dirent(struct chfs_mount *chmp,
ip = VTOI(vnode);
/* Remove and obsolete the previous version. */
mutex_enter(&chmp->chm_lock_vnocache);
chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
&parent->chvc->dirents);
mutex_exit(&chmp->chm_lock_vnocache);
/* Write the new dirent to the flash. */
return chfs_write_flash_dirent(chmp,
parent, ip, fd, fd->vno, ALLOC_GC);
}
/* Check dirents what are marked as deleted. */
/*
* chfs_gcollect_deletion_dirent -
* collects a dirent what was marked as deleted
*/
int
chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, struct chfs_inode *parent,
@ -942,6 +989,7 @@ chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
dbg_gc("gcollect_deletion_dirent\n");
/* Check node. */
name_len = strlen(fd->name);
name_crc = crc32(0, fd->name, name_len);
@ -949,18 +997,7 @@ chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
vnode = chfs_vnode_lookup(chmp, fd->vno);
//dbg_gc("ip from vnode\n");
//VFS_VGET(chmp->chm_fsmp, fd->vno, &vnode);
//ip = VTOI(vnode);
//vput(vnode);
//dbg_gc("mutex enter erase_completion_lock\n");
// dbg_gc("alloc chfdn\n");
// chfdn = kmem_alloc(nref_len, KM_SLEEP);
// if (!chfdn)
// return ENOMEM;
/* Find it in parent dirents. */
for (nref = parent->chvc->dirents;
nref != (void*)parent->chvc;
nref = nref->nref_next) {
@ -977,6 +1014,7 @@ chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
continue;
}
/* read it from flash */
ret = chfs_read_leb(chmp,
nref->nref_lnr, (void*)&chfdn, CHFS_GET_OFS(nref->nref_offset),
nref_len, &retlen);
@ -1010,31 +1048,27 @@ chfs_gcollect_deletion_dirent(struct chfs_mount *chmp,
if (memcmp(chfdn.name, fd->name, name_len))
continue;
// kmem_free(chfdn, nref_len);
mutex_enter(&chmp->chm_lock_vnocache);
chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
&parent->chvc->dirents);
mutex_exit(&chmp->chm_lock_vnocache);
//chfs_mark_node_obsolete(chmp, fd->nref);
return chfs_write_flash_dirent(chmp,
parent, NULL, fd, fd->vno, ALLOC_GC);
}
// kmem_free(chfdn, nref_len);
/* Simply remove it from the parent dirents. */
TAILQ_REMOVE(&parent->dents, fd, fds);
chfs_free_dirent(fd);
return 0;
}
/* chfs_gcollect_dnode - */
int
chfs_gcollect_dnode(struct chfs_mount *chmp,
struct chfs_eraseblock *orig_cheb, struct chfs_inode *ip,
struct chfs_full_dnode *fn, uint32_t orig_start, uint32_t orig_end)
{
struct chfs_node_ref *nref;
//struct chfs_node_ref *prev;
struct chfs_full_dnode *newfn;
struct chfs_flash_data_node *fdnode;
int ret = 0, retries = 0;
@ -1044,87 +1078,13 @@ chfs_gcollect_dnode(struct chfs_mount *chmp,
size_t retlen;
dbg_gc("gcollect_dnode\n");
//uint32_t used_size;
//TODO merge frags
/* TODO GC merging frags, should we use it?
uint32_t start, end;
start = orig_start;
end = orig_end;
if (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks > chmp->chm_resv_blocks_gcmerge) {
struct chfs_node_frag *frag;
uint32_t min, max;
min = start & (PAGE_CACHE_SIZE-1);
max = min + PAGE_CACHE_SIZE;
frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->i_chfs_ext.fragtree, &start);
KASSERT(frag->ofs == start);
while ((frag = frag_prev(&ip->i_chfs_ext.fragtree, frag)) && frag->ofs >= min) {
if (frag->ofs > min) {
start = frag->ofs;
continue;
}
if (!frag->node || !frag->node->nref) {
break;
} else {
struct chfs_node_ref *nref = frag->node->nref;
struct chfs_eraseblock *cheb;
cheb = &chmp->chm_blocks[nref->nref_lnr];
if (cheb == chmp->chm_gcblock)
start = frag->ofs;
//TODO is this a clean block?
start = frag->ofs;
break;
}
}
end--;
frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->i_chfs_ext.fragtree, &(end));
while ((frag = frag_next(&ip->i_chfs_ext.fragtree, frag)) && (frag->ofs + frag->size <= max)) {
if (frag->ofs + frag->size < max) {
end = frag->ofs + frag->size;
continue;
}
if (!frag->node || !frag->node->nref) {
break;
} else {
struct chfs_node_ref *nref = frag->node->nref;
struct chfs_eraseblock *cheb;
cheb = &chmp->chm_blocks[nref->nref_lnr];
if (cheb == chmp->chm_gcblock)
end = frag->ofs + frag->size;
//TODO is this a clean block?
end = frag->ofs + frag->size;
break;
}
}
KASSERT(end <=
frag_last(&ip->i_chfs_ext.fragtree)->ofs +
frag_last(&ip->i_chfs_ext.fragtree)->size);
KASSERT(end >= orig_end);
KASSERT(start <= orig_start);
}
*/
KASSERT(orig_cheb->lnr == fn->nref->nref_lnr);
totlen = chfs_nref_len(chmp, orig_cheb, fn->nref);
data = kmem_alloc(totlen, KM_SLEEP);
/* Read the node from the flash. */
ret = chfs_read_leb(chmp, fn->nref->nref_lnr, data, fn->nref->nref_offset,
totlen, &retlen);
@ -1137,6 +1097,7 @@ chfs_gcollect_dnode(struct chfs_mount *chmp,
vec.iov_len = totlen;
retry:
/* Set the next block where we can write. */
ret = chfs_reserve_space_gc(chmp, totlen);
if (ret)
goto out;
@ -1153,8 +1114,10 @@ retry:
KASSERT(nref->nref_offset % 4 == 0);
chfs_change_size_free(chmp, chmp->chm_nextblock, -totlen);
/* Write it to the writebuffer. */
ret = chfs_write_wbuf(chmp, &vec, 1, nref->nref_offset, &retlen);
if (ret || retlen != totlen) {
/* error during writing */
chfs_err("error while writing out to the media\n");
chfs_err("err: %d | size: %d | retlen : %zu\n",
ret, totlen, retlen);
@ -1165,6 +1128,7 @@ retry:
goto out;
}
/* try again */
retries++;
mutex_exit(&chmp->chm_lock_sizes);
goto retry;
@ -1176,6 +1140,7 @@ retry:
mutex_exit(&chmp->chm_lock_sizes);
KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
/* Set fields of the new node. */
newfn = chfs_alloc_full_dnode();
newfn->nref = nref;
newfn->ofs = fn->ofs;
@ -1183,9 +1148,11 @@ retry:
newfn->frags = 0;
mutex_enter(&chmp->chm_lock_vnocache);
/* Remove every part of the old node. */
chfs_remove_frags_of_node(chmp, &ip->fragtree, fn->nref);
chfs_remove_and_obsolete(chmp, ip->chvc, fn->nref, &ip->chvc->dnode);
/* Add the new nref to inode. */
chfs_add_full_dnode_to_inode(chmp, ip, newfn);
chfs_add_node_to_list(chmp,
ip->chvc, newfn->nref, &ip->chvc->dnode);

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_ihash.c,v 1.1 2011/11/24 15:51:31 ahoka Exp $ */
/* $NetBSD: chfs_ihash.c,v 1.2 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -148,12 +148,9 @@ loop:
LIST_FOREACH(ip, ipp, hash_entry) {
dbg("ip: %p\n", ip);
if (inum == ip->ino && dev == ip->dev) {
// printf("chfs_ihashget: found inode: %p\n", ip);
vp = ITOV(ip);
KASSERT(vp != NULL);
//dbg("found\n");
if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) {
//dbg("wait for #%llu\n", ip->ino);
mutex_exit(&chfs_ihash_lock);
goto loop;
}
@ -164,22 +161,17 @@ loop:
dbg("isn't locked\n");
*/
if (flags == 0) {
//dbg("no flags\n");
mutex_exit(&chfs_ihash_lock);
} else {
//dbg("vget\n");
mutex_enter(vp->v_interlock);
mutex_exit(&chfs_ihash_lock);
if (vget(vp, flags)) {
goto loop;
}
//dbg("got it\n");
}
//dbg("return\n");
return (vp);
}
}
//dbg("not found\n");
mutex_exit(&chfs_ihash_lock);
return (NULL);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_inode.h,v 1.5 2012/04/18 13:31:10 joerg Exp $ */
/* $NetBSD: chfs_inode.h,v 1.6 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -61,7 +61,7 @@ enum chtype {
#define CHTTOVT(ch_type) (enum vtype)(ch_type)
#define VTTOCHT(v_type) (enum chtype)(v_type)
/* vtype replaced with chtype, these are only for compatibility */
/* vtype replaced with chtype, these are only for backward compatibility */
static const enum chtype iftocht_tab[16] = {
CHT_BLANK, CHT_FIFO, CHT_CHR, CHT_BLANK,
CHT_DIR, CHT_BLANK, CHT_BLK, CHT_BLANK,
@ -75,11 +75,11 @@ static const enum chtype iftocht_tab[16] = {
struct chfs_inode
{
struct genfs_node gnode;
kmutex_t inode_lock; /* lock the fields of chfs_inode */
kmutex_t inode_lock; /* lock the fields of chfs_inode */
LIST_ENTRY(chfs_inode) hash_entry; /* Hash chain. */
LIST_ENTRY(chfs_inode) hash_entry; /* hash chain */
struct ufsmount *ump; /* ufs mount - TODO we should remove it */
struct ufsmount *ump; /* ufs mount - TODO we should remove it */
struct chfs_mount *chmp; /* chfs mount point - TODO we should remove it */
struct vnode *vp; /* vnode associated with this inode */
@ -90,31 +90,28 @@ struct chfs_inode
struct chfs_vnode_cache *chvc; /* vnode cache of this node */
struct chfs_dirent *fd; /* full dirent of this node */
// struct chfs_dirent *dents; /* directory entries */
struct chfs_dirent *fd; /* full dirent of this node */
struct chfs_dirent_list dents;
struct rb_tree fragtree; /* fragtree of inode */
struct rb_tree fragtree; /* fragtree of inode */
uint64_t version; /* version number */
//uint64_t highest_version; /* highest vers. num. (used at data nodes) */
uint64_t version; /* version number */
uint32_t mode; /* mode */
enum chtype ch_type; /* chfs file type */
//int16_t nlink; /* link count */
uint64_t size; /* file byte count */
uint32_t mode; /* mode */
enum chtype ch_type; /* chfs file type */
uint64_t size; /* file byte count */
uint64_t write_size; /* increasing while write the file out to the flash */
uint32_t uid; /* file owner */
uint32_t gid; /* file group */
uint32_t atime; /* access time */
uint32_t mtime; /* modify time */
uint32_t ctime; /* creation time */
uint32_t uid; /* file owner */
uint32_t gid; /* file group */
uint32_t atime; /* access time */
uint32_t mtime; /* modify time */
uint32_t ctime; /* creation time */
uint32_t iflag; /* flags, see below */
uint32_t flags; /* status flags (chflags) */
uint32_t iflag; /* flags, see below */
uint32_t flags; /* status flags (chflags) */
dev_t rdev; /* used if type is VCHR or VBLK or VFIFO*/
char *target; /* used if type is VLNK */
dev_t rdev; /* used if type is VCHR or VBLK or VFIFO*/
char *target; /* used if type is VLNK */
};
/* These flags are kept in chfs_inode->iflag. */
@ -140,7 +137,9 @@ struct chfs_inode
# undef ITOV
#endif
/* struct vnode to struct chfs_inode */
#define VTOI(vp) ((struct chfs_inode *)(vp)->v_data)
/* struct chfs_inode to struct vnode */
#define ITOV(ip) ((ip)->vp)
/* copied from ufs_dinode.h */

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_malloc.c,v 1.3 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_malloc.c,v 1.4 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -44,6 +44,7 @@ pool_cache_t chfs_node_frag_cache;
pool_cache_t chfs_tmp_dnode_cache;
pool_cache_t chfs_tmp_dnode_info_cache;
/* chfs_alloc_pool_caches - allocating pool caches */
int
chfs_alloc_pool_caches(void)
{
@ -117,6 +118,7 @@ err_vnode:
return ENOMEM;
}
/* chfs_destroy_pool_caches - destroying pool caches */
void
chfs_destroy_pool_caches(void)
{
@ -145,6 +147,7 @@ chfs_destroy_pool_caches(void)
pool_cache_destroy(chfs_tmp_dnode_info_cache);
}
/* chfs_vnode_cache_alloc - allocating and initializing a vnode cache */
struct chfs_vnode_cache *
chfs_vnode_cache_alloc(ino_t vno)
{
@ -153,6 +156,7 @@ chfs_vnode_cache_alloc(ino_t vno)
memset(vc, 0, sizeof(*vc));
vc->vno = vno;
/* vnode cache is the last element of all chain */
vc->v = (void *)vc;
vc->dirents = (void *)vc;
vc->dnode = (void *)vc;
@ -162,14 +166,14 @@ chfs_vnode_cache_alloc(ino_t vno)
return vc;
}
/* chfs_vnode_cache_free - freeing a vnode cache */
void
chfs_vnode_cache_free(struct chfs_vnode_cache *vc)
{
//kmem_free(vc->vno_version, sizeof(uint64_t));
pool_cache_put(chfs_vnode_cache, vc);
}
/**
/*
* chfs_alloc_refblock - allocating a refblock
*
* Returns a pointer of the first element in the block.
@ -198,18 +202,15 @@ chfs_alloc_refblock(void)
return nref;
}
/**
* chfs_free_refblock - freeing a refblock
*/
/* chfs_free_refblock - freeing a refblock */
void
chfs_free_refblock(struct chfs_node_ref *nref)
{
pool_cache_put(chfs_nrefs_cache, nref);
}
/**
/*
* chfs_alloc_node_ref - allocating a node ref from a refblock
* @cheb: eraseblock information structure
*
* Allocating a node ref from a refblock, it there isn't any free element in the
* block, a new block will be allocated and be linked to the current block.
@ -222,7 +223,7 @@ chfs_alloc_node_ref(struct chfs_eraseblock *cheb)
nref = cheb->last_node;
if (!nref) {
//There haven't been any nref allocated for this block yet
/* There haven't been any nref allocated for this block yet */
nref = chfs_alloc_refblock();
cheb->first_node = nref;
@ -235,6 +236,7 @@ chfs_alloc_node_ref(struct chfs_eraseblock *cheb)
nref++;
if (nref->nref_lnr == REF_LINK_TO_NEXT) {
/* this was the last element, allocate a new block */
new = chfs_alloc_refblock();
nref->nref_next = new;
nref = new;
@ -249,10 +251,7 @@ chfs_alloc_node_ref(struct chfs_eraseblock *cheb)
return nref;
}
/**
* chfs_free_node_refs - freeing an eraseblock's node refs
* @cheb: eraseblock information structure
*/
/* chfs_free_node_refs - freeing an eraseblock's node refs */
void
chfs_free_node_refs(struct chfs_eraseblock *cheb)
{
@ -271,6 +270,7 @@ chfs_free_node_refs(struct chfs_eraseblock *cheb)
}
}
/* chfs_alloc_dirent - allocating a directory entry */
struct chfs_dirent*
chfs_alloc_dirent(int namesize)
{
@ -278,20 +278,20 @@ chfs_alloc_dirent(int namesize)
size_t size = sizeof(struct chfs_dirent) + namesize;
ret = kmem_alloc(size, KM_SLEEP);
//ret->alloc_size = size;
return ret;
}
/* chfs_free_dirent - freeing a directory entry */
void
chfs_free_dirent(struct chfs_dirent *dirent)
{
//size_t size = dirent->alloc_size;
size_t size = sizeof(struct chfs_dirent) + dirent->nsize + 1;
kmem_free(dirent, size);
}
/* chfs_alloc_full_dnode - allocating a full data node */
struct chfs_full_dnode*
chfs_alloc_full_dnode(void)
{
@ -302,12 +302,14 @@ chfs_alloc_full_dnode(void)
return ret;
}
/* chfs_free_full_dnode - freeing a full data node */
void
chfs_free_full_dnode(struct chfs_full_dnode *fd)
{
kmem_free(fd,(sizeof(struct chfs_full_dnode)));
}
/* chfs_alloc_flash_vnode - allocating vnode info (used on flash) */
struct chfs_flash_vnode*
chfs_alloc_flash_vnode(void)
{
@ -316,12 +318,14 @@ chfs_alloc_flash_vnode(void)
return ret;
}
/* chfs_free_flash_vnode - freeing vnode info */
void
chfs_free_flash_vnode(struct chfs_flash_vnode *fvnode)
{
pool_cache_put(chfs_flash_vnode_cache, fvnode);
}
/* chfs_alloc_flash_dirent - allocating a directory entry (used on flash) */
struct chfs_flash_dirent_node*
chfs_alloc_flash_dirent(void)
{
@ -330,12 +334,14 @@ chfs_alloc_flash_dirent(void)
return ret;
}
/* chfs_free_flash_dirent - freeing a (flash) directory entry */
void
chfs_free_flash_dirent(struct chfs_flash_dirent_node *fdnode)
{
pool_cache_put(chfs_flash_dirent_cache, fdnode);
}
/* chfs_alloc_flash_dnode - allocating a data node (used on flash) */
struct chfs_flash_data_node*
chfs_alloc_flash_dnode(void)
{
@ -344,28 +350,30 @@ chfs_alloc_flash_dnode(void)
return ret;
}
/* chfs_free_flash_dnode - freeing a (flash) data node */
void
chfs_free_flash_dnode(struct chfs_flash_data_node *fdnode)
{
pool_cache_put(chfs_flash_dnode_cache, fdnode);
}
/* chfs_alloc_node_frag - allocating a fragment of a node */
struct chfs_node_frag*
chfs_alloc_node_frag(void)
{
struct chfs_node_frag *ret;
ret = pool_cache_get(chfs_node_frag_cache, 0);
return ret;
}
/* chfs_free_node_frag - freeing a fragment of a node */
void
chfs_free_node_frag(struct chfs_node_frag *frag)
{
pool_cache_put(chfs_node_frag_cache, frag);
}
/* chfs_alloc_tmp_dnode - allocating a temporarly used dnode */
struct chfs_tmp_dnode *
chfs_alloc_tmp_dnode(void)
{
@ -375,12 +383,14 @@ chfs_alloc_tmp_dnode(void)
return ret;
}
/* chfs_free_tmp_dnode - freeing a temporarly used dnode */
void
chfs_free_tmp_dnode(struct chfs_tmp_dnode *td)
{
pool_cache_put(chfs_tmp_dnode_cache, td);
}
/* chfs_alloc_tmp_dnode_info - allocating a temporarly used dnode descriptor */
struct chfs_tmp_dnode_info *
chfs_alloc_tmp_dnode_info(void)
{
@ -390,6 +400,7 @@ chfs_alloc_tmp_dnode_info(void)
return ret;
}
/* chfs_free_tmp_dnode_info - freeing a temporarly used dnode descriptor */
void
chfs_free_tmp_dnode_info(struct chfs_tmp_dnode_info *di)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_nodeops.c,v 1.2 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_nodeops.c,v 1.3 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -35,13 +35,10 @@
#include "chfs.h"
/**
/*
* chfs_update_eb_dirty - updates dirty and free space, first and
* last node references
* @sbi: CHFS main descriptor structure
* @cheb: eraseblock to update
* @size: increase dirty space size with this
* Returns zero in case of success, %1 in case of fail.
* Returns zero in case of success, 1 in case of fail.
*/
int
chfs_update_eb_dirty(struct chfs_mount *chmp,
@ -59,19 +56,14 @@ chfs_update_eb_dirty(struct chfs_mount *chmp,
return 1;
}
mutex_enter(&chmp->chm_lock_sizes);
//dbg("BEFORE: free_size: %d\n", cheb->free_size);
chfs_change_size_free(chmp, cheb, -size);
chfs_change_size_dirty(chmp, cheb, size);
//dbg(" AFTER: free_size: %d\n", cheb->free_size);
mutex_exit(&chmp->chm_lock_sizes);
return 0;
}
/**
/*
* chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
* @sbi: super block informations
* @new: node ref to insert
* @list: head of the list
* This function inserts a data node ref to the list of vnode cache.
* The list is sorted by data node's lnr and offset.
*/
@ -121,7 +113,8 @@ chfs_add_node_to_list(struct chfs_mount *chmp,
}
/*
* Removes a node from a list. Usually used for removing data nodes.
* chfs_remove_node_from_list - removes a node from a list
* Usually used for removing data nodes.
*/
void
chfs_remove_node_from_list(struct chfs_mount *chmp,
@ -134,6 +127,7 @@ chfs_remove_node_from_list(struct chfs_mount *chmp,
struct chfs_node_ref *tmpnref;
if (*list == (struct chfs_node_ref *)vc) {
/* list is empty */
return;
}
@ -155,9 +149,9 @@ chfs_remove_node_from_list(struct chfs_mount *chmp,
}
/*
* Removes a node from a list and obsoletes the nref.
* chfs_remove_and_obsolete - removes a node from a list and obsoletes the nref
* We should use this function carefully on data nodes,
* because removing a frag will obsolete the node ref.
* because removing a frag will also obsolete the node ref.
*/
void
chfs_remove_and_obsolete(struct chfs_mount *chmp,
@ -174,18 +168,18 @@ chfs_remove_and_obsolete(struct chfs_mount *chmp,
chfs_mark_node_obsolete(chmp, old_nref);
}
/* chfs_add_fd_to_inode - adds a directory entry to an inode */
void
chfs_add_fd_to_inode(struct chfs_mount *chmp,
struct chfs_inode *parent, struct chfs_dirent *new)
{
// struct chfs_dirent **prev = &parent->dents;
struct chfs_dirent *fd, *tmpfd;
/* update highest version */
if (new->version > parent->chvc->highest_version) {
parent->chvc->highest_version = new->version;
}
//mutex_enter(&parent->inode_lock);
TAILQ_FOREACH_SAFE(fd, &parent->dents, fds, tmpfd) {
if (fd->nhash > new->nhash) {
/* insert new before fd */
@ -194,7 +188,6 @@ chfs_add_fd_to_inode(struct chfs_mount *chmp,
} else if (fd->nhash == new->nhash &&
!strcmp(fd->name, new->name)) {
if (new->version > fd->version) {
// new->next = fd->next;
/* replace fd with new */
TAILQ_INSERT_BEFORE(fd, new, fds);
TAILQ_REMOVE(&parent->dents, fd, fds);
@ -205,8 +198,8 @@ chfs_add_fd_to_inode(struct chfs_mount *chmp,
mutex_exit(&chmp->chm_lock_vnocache);
}
chfs_free_dirent(fd);
// *prev = new;//XXX
} else {
/* new is older (normally it's not an option) */
chfs_mark_node_obsolete(chmp, new->nref);
chfs_free_dirent(new);
}
@ -216,33 +209,10 @@ chfs_add_fd_to_inode(struct chfs_mount *chmp,
/* if we couldnt fit it elsewhere, lets add to the end */
/* FIXME insert tail or insert head? */
TAILQ_INSERT_HEAD(&parent->dents, new, fds);
//mutex_exit(&parent->inode_lock);
#if 0
while ((*prev) && (*prev)->nhash <= new->nhash) {
if ((*prev)->nhash == new->nhash &&
!strcmp((*prev)->name, new->name)) {
if (new->version > (*prev)->version) {
new->next = (*prev)->next;
if ((*prev)->nref) {
chfs_mark_node_obsolete(chmp,
(*prev)->nref);
}
chfs_free_dirent(*prev);
*prev = new;
} else {
chfs_mark_node_obsolete(chmp, new->nref);
chfs_free_dirent(new);
}
return;
}
prev = &((*prev)->next);
}
new->next = *prev;
*prev = new;
#endif
}
/* chfs_add_vnode_ref_to_vc - adds a vnode info to the vnode cache */
void
chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc, struct chfs_node_ref *new)
@ -250,6 +220,7 @@ chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
struct chfs_node_ref *nref;
/* store only the last one, drop the others */
while (vc->v != (struct chfs_node_ref *)vc) {
nref = vc->v;
chfs_remove_and_obsolete(chmp, vc, nref, &vc->v);
@ -259,26 +230,27 @@ chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
vc->v = new;
}
/* chfs_nref_next - step to the next in-memory nref */
struct chfs_node_ref *
chfs_nref_next(struct chfs_node_ref *nref)
{
// dbg("check nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
nref++;
// dbg("next nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
if (nref->nref_lnr == REF_LINK_TO_NEXT) {
//End of chain
/* end of chain */
if (!nref->nref_next)
return NULL;
/* link to the next block */
nref = nref->nref_next;
}
//end of chain
/* end of chain */
if (nref->nref_lnr == REF_EMPTY_NODE)
return NULL;
return nref;
}
/* chfs_nref_len - calculates the length of an nref */
int
chfs_nref_len(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, struct chfs_node_ref *nref)
@ -293,18 +265,14 @@ chfs_nref_len(struct chfs_mount *chmp,
next = chfs_nref_next(nref);
if (!next) {
//dbg("next null\n");
return chmp->chm_ebh->eb_size - cheb->free_size -
CHFS_GET_OFS(nref->nref_offset);
}
//dbg("size: %d\n", CHFS_GET_OFS(next->nref_offset) - CHFS_GET_OFS(nref->nref_offset));
return CHFS_GET_OFS(next->nref_offset) -
CHFS_GET_OFS(nref->nref_offset);
}
/**
* chfs_mark_node_obsolete - marks a node obsolete
*/
/* chfs_mark_node_obsolete - marks a node as obsolete */
void
chfs_mark_node_obsolete(struct chfs_mount *chmp,
struct chfs_node_ref *nref)
@ -332,19 +300,14 @@ chfs_mark_node_obsolete(struct chfs_mount *chmp,
#endif
len = chfs_nref_len(chmp, cheb, nref);
//dbg("len: %u\n", len);
//dbg("1. used: %u\n", cheb->used_size);
mutex_enter(&chmp->chm_lock_sizes);
if (CHFS_REF_FLAGS(nref) == CHFS_UNCHECKED_NODE_MASK) {
//dbg("UNCHECKED mark an unchecked node\n");
chfs_change_size_unchecked(chmp, cheb, -len);
//dbg("unchecked: %u\n", chmp->chm_unchecked_size);
} else {
chfs_change_size_used(chmp, cheb, -len);
//dbg("2. used: %u\n", cheb->used_size);
KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
}
chfs_change_size_dirty(chmp, cheb, len);
@ -376,8 +339,8 @@ chfs_mark_node_obsolete(struct chfs_mount *chmp,
dbg("gcblock is completely dirtied\n");
chmp->chm_gcblock = NULL;
} else {
//remove from a tailq, but we don't know which tailq contains this cheb
//so we remove it from the dirty list now
/* remove from a tailq, but we don't know which tailq contains this cheb
* so we remove it from the dirty list now */
//TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
int removed = 0;
struct chfs_eraseblock *eb, *tmpeb;
@ -448,10 +411,8 @@ chfs_mark_node_obsolete(struct chfs_mount *chmp,
return;
}
/**
/*
* chfs_close_eraseblock - close an eraseblock
* @chmp: chfs mount structure
* @cheb: eraseblock informations
*
* This function close the physical chain of the nodes on the eraseblock,
* convert its free size to dirty and add it to clean, dirty or very dirty list.
@ -488,6 +449,11 @@ chfs_close_eraseblock(struct chfs_mount *chmp,
return 0;
}
/*
* chfs_reserve_space_normal -
* checks available space and calls chfs_reserve_space
* used during writing
*/
int
chfs_reserve_space_normal(struct chfs_mount *chmp, uint32_t size, int prio)
{
@ -541,6 +507,7 @@ out:
}
/* chfs_reserve_space_gc - tries to reserve space for GC */
int
chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
{
@ -562,11 +529,8 @@ chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
return ret;
}
/**
/*
* chfs_reserve_space - finds a block which free size is >= requested size
* @chmp: chfs mount point
* @size: requested size
* @len: reserved spaced will be returned in this variable;
* Returns zero in case of success, error code in case of fail.
*/
int
@ -583,8 +547,6 @@ chfs_reserve_space(struct chfs_mount *chmp, uint32_t size)
KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
cheb = chmp->chm_nextblock;
//if (cheb)
//dbg("cheb->free_size %u\n", cheb->free_size);
if (cheb && size > cheb->free_size) {
dbg("size: %u > free_size: %u\n", size, cheb->free_size);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_readinode.c,v 1.5 2012/08/22 09:20:13 ttoth Exp $ */
/* $NetBSD: chfs_readinode.c,v 1.6 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -33,13 +33,6 @@
* SUCH DAMAGE.
*/
/*
* chfs_readinode.c
*
* Created on: 2010.05.31.
* Author: dtengeri
*/
#include <sys/buf.h>
#include "chfs.h"
@ -82,11 +75,7 @@ int chfs_build_fragtree(struct chfs_mount *,
/*
* --------------------------
* tmp node rbtree operations
* --------------------------
*/
/* tmp node rbtree operations */
static signed int
tmp_node_compare_nodes(void *ctx, const void *n1, const void *n2)
{
@ -113,11 +102,7 @@ const rb_tree_ops_t tmp_node_rbtree_ops = {
};
/*
* ---------------------------
* frag node rbtree operations
* ---------------------------
*/
/* frag node rbtree operations */
static signed int
frag_compare_nodes(void *ctx, const void *n1, const void *n2)
{
@ -145,12 +130,7 @@ const rb_tree_ops_t frag_rbtree_ops = {
/*
* -------------------
* tmp node operations
* -------------------
*/
/*
* Check the data CRC of the node.
* chfs_check_td_data - checks the data CRC of the node
*
* Returns: 0 - if everything OK;
* 1 - if CRC is incorrect;
@ -176,6 +156,7 @@ chfs_check_td_data(struct chfs_mount *chmp,
if (!len)
return 0;
/* Read data. */
buf = kmem_alloc(len, KM_SLEEP);
if (!buf) {
dbg("allocating error\n");
@ -188,6 +169,7 @@ chfs_check_td_data(struct chfs_mount *chmp,
goto out;
}
/* Check crc. */
if (len != retlen) {
dbg("len:%zu, retlen:%zu\n", len, retlen);
err = 2;
@ -201,6 +183,7 @@ chfs_check_td_data(struct chfs_mount *chmp,
return 1;
}
/* Correct sizes. */
CHFS_MARK_REF_NORMAL(nref);
totlen = CHFS_PAD(sizeof(struct chfs_flash_data_node) + len);
@ -216,6 +199,7 @@ out:
return err;
}
/* chfs_check_td_node - checks a temporary node */
int
chfs_check_td_node(struct chfs_mount *chmp, struct chfs_tmp_dnode *td)
{
@ -228,7 +212,10 @@ chfs_check_td_node(struct chfs_mount *chmp, struct chfs_tmp_dnode *td)
return ret;
}
/*
* chfs_first_valid_data_ref -
* returns the first valid nref after the given nref
*/
struct chfs_node_ref *
chfs_first_valid_data_ref(struct chfs_node_ref *nref)
{
@ -250,13 +237,19 @@ chfs_first_valid_data_ref(struct chfs_node_ref *nref)
return NULL;
}
/*
* chfs_add_tmp_dnode_to_tdi -
* adds a temporary node to a temporary node descriptor
*/
void
chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *tdi,
struct chfs_tmp_dnode *td)
{
if (!tdi->tmpnode) {
/* The chain is empty. */
tdi->tmpnode = td;
} else {
/* Insert into the chain. */
struct chfs_tmp_dnode *tmp = tdi->tmpnode;
while (tmp->next) {
tmp = tmp->next;
@ -265,13 +258,19 @@ chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *tdi,
}
}
/*
* chfs_remove_tmp_dnode_from_tdi -
* removes a temporary node from its descriptor
*/
void
chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *tdi,
struct chfs_tmp_dnode *td)
{
if (tdi->tmpnode == td) {
/* It's the first in the chain. */
tdi->tmpnode = tdi->tmpnode->next;
} else {
/* Remove from the middle of the chain. */
struct chfs_tmp_dnode *tmp = tdi->tmpnode->next;
while (tmp->next && tmp->next != td) {
tmp = tmp->next;
@ -282,6 +281,7 @@ chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *tdi,
}
}
/* chfs_kill_td - removes all components of a temporary node */
static void
chfs_kill_td(struct chfs_mount *chmp,
struct chfs_tmp_dnode *td)
@ -289,6 +289,7 @@ chfs_kill_td(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc;
if (td->node) {
mutex_enter(&chmp->chm_lock_vnocache);
/* Remove the node from the vnode cache's data node chain. */
vc = chfs_nref_to_vc(td->node->nref);
chfs_remove_and_obsolete(chmp, vc, td->node->nref, &vc->dnode);
mutex_exit(&chmp->chm_lock_vnocache);
@ -297,12 +298,14 @@ chfs_kill_td(struct chfs_mount *chmp,
chfs_free_tmp_dnode(td);
}
/* chfs_kill_tdi - removes a temporary node descriptor */
static void
chfs_kill_tdi(struct chfs_mount *chmp,
struct chfs_tmp_dnode_info *tdi)
{
struct chfs_tmp_dnode *next, *tmp = tdi->tmpnode;
/* Iterate the chain and remove all temporary node from it. */
while (tmp) {
next = tmp->next;
chfs_kill_td(chmp, tmp);
@ -312,6 +315,10 @@ chfs_kill_tdi(struct chfs_mount *chmp,
chfs_free_tmp_dnode_info(tdi);
}
/*
* chfs_add_tmp_dnode_to_tree -
* adds a temporary node to the temporary tree
*/
int
chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
struct chfs_readinode_info *rii,
@ -335,6 +342,7 @@ chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
this = (struct chfs_tmp_dnode_info *)node;
}
}
while (node) {
this = (struct chfs_tmp_dnode_info *)node;
if (this->tmpnode->node->ofs > end_ofs)
@ -343,6 +351,7 @@ chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
struct chfs_tmp_dnode *tmp_td = this->tmpnode;
while (tmp_td) {
if (tmp_td->version == newtd->version) {
/* This is a new version of an old node. */
if (!chfs_check_td_node(chmp, tmp_td)) {
dbg("calling kill td 0\n");
chfs_kill_td(chmp, newtd);
@ -459,17 +468,14 @@ chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
}
/*
* --------------------
* frag node operations
* --------------------
*/
/* new_fragment - creates a new fragment for a data node */
struct chfs_node_frag *
new_fragment(struct chfs_full_dnode *fdn, uint32_t ofs, uint32_t size)
{
struct chfs_node_frag *newfrag;
newfrag = chfs_alloc_node_frag();
if (newfrag) {
/* Initialize fragment. */
newfrag->ofs = ofs;
newfrag->size = size;
newfrag->node = fdn;
@ -482,6 +488,10 @@ new_fragment(struct chfs_full_dnode *fdn, uint32_t ofs, uint32_t size)
return newfrag;
}
/*
* no_overlapping_node - inserts a node to the fragtree
* Puts hole frag into the holes between fragments.
*/
int
no_overlapping_node(struct rb_tree *fragtree,
struct chfs_node_frag *newfrag,
@ -505,6 +515,10 @@ no_overlapping_node(struct rb_tree *fragtree,
return 0;
}
/*
* chfs_add_frag_to_fragtree -
* adds a fragment to a data node's fragtree
*/
int
chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
struct rb_tree *fragtree,
@ -514,6 +528,7 @@ chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
uint32_t lastend;
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
/* Find the offset of frag which is before the new one. */
this = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &newfrag->ofs);
if (this) {
@ -522,8 +537,8 @@ chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
lastend = 0;
}
/* New fragment is end of the file and there is no overlapping. */
if (lastend <= newfrag->ofs) {
//dbg("no overlapping node\n");
if (lastend && (lastend - 1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
if (this->node)
CHFS_MARK_REF_NORMAL(this->node->nref);
@ -533,14 +548,12 @@ chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
}
if (newfrag->ofs > this->ofs) {
CHFS_MARK_REF_NORMAL(newfrag->node->nref);
if (this->node)
CHFS_MARK_REF_NORMAL(this->node->nref);
if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
/* newfrag is inside of this */
//dbg("newfrag is inside of this\n");
/* Newfrag is inside of this. */
struct chfs_node_frag *newfrag2;
newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
@ -555,13 +568,11 @@ chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
return 0;
}
/* newfrag is bottom of this */
//dbg("newfrag is bottom of this\n");
/* Newfrag is bottom of this. */
this->size = newfrag->ofs - this->ofs;
rb_tree_insert_node(fragtree, newfrag);
} else {
/* newfrag start at same point */
//dbg("newfrag start at same point\n");
/* Newfrag start at same point */
//TODO replace instead of remove and insert
rb_tree_remove_node(fragtree, this);
rb_tree_insert_node(fragtree, newfrag);
@ -597,6 +608,10 @@ chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
return 0;
}
/*
* chfs_remove_frags_of_node -
* removes all fragments from a fragtree and DOESN'T OBSOLETE them
*/
void
chfs_remove_frags_of_node(struct chfs_mount *chmp, struct rb_tree *fragtree,
struct chfs_node_ref *nref)
@ -608,6 +623,7 @@ chfs_remove_frags_of_node(struct chfs_mount *chmp, struct rb_tree *fragtree,
return;
}
/* Iterate the tree and clean all elements. */
this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree);
while (this) {
next = frag_next(fragtree, this);
@ -619,24 +635,27 @@ chfs_remove_frags_of_node(struct chfs_mount *chmp, struct rb_tree *fragtree,
}
}
/*
* chfs_kill_fragtree -
* removes all fragments from a fragtree and OBSOLETES them
*/
void
chfs_kill_fragtree(struct chfs_mount *chmp, struct rb_tree *fragtree)
{
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
struct chfs_node_frag *this, *next;
//dbg("start\n");
/* Iterate the tree and clean all elements. */
this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree);
while (this) {
next = frag_next(fragtree, this);
rb_tree_remove_node(fragtree, this);
chfs_obsolete_node_frag(chmp, this);
//dbg("one frag killed\n");
this = next;
}
//dbg("end\n");
}
/* chfs_truncate_fragtree - truncates the tree to a specified size */
uint32_t
chfs_truncate_fragtree(struct chfs_mount *chmp,
struct rb_tree *fragtree, uint32_t size)
@ -688,6 +707,7 @@ chfs_truncate_fragtree(struct chfs_mount *chmp,
return size;
}
/* chfs_obsolete_node_frag - obsoletes a fragment of a node */
void
chfs_obsolete_node_frag(struct chfs_mount *chmp,
struct chfs_node_frag *this)
@ -695,9 +715,11 @@ chfs_obsolete_node_frag(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc;
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
if (this->node) {
/* The fragment is in a node. */
KASSERT(this->node->frags != 0);
this->node->frags--;
if (this->node->frags == 0) {
/* This is the last fragment. (There is no more.) */
KASSERT(!CHFS_REF_OBSOLETE(this->node->nref));
mutex_enter(&chmp->chm_lock_vnocache);
vc = chfs_nref_to_vc(this->node->nref);
@ -709,12 +731,14 @@ chfs_obsolete_node_frag(struct chfs_mount *chmp,
chfs_free_full_dnode(this->node);
} else {
/* There is more frags in the node. */
CHFS_MARK_REF_NORMAL(this->node->nref);
}
}
chfs_free_node_frag(this);
}
/* chfs_add_full_dnode_to_inode - adds a data node to an inode */
int
chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
struct chfs_inode *ip,
@ -727,6 +751,7 @@ chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
if (unlikely(!fd->size))
return 0;
/* Create a new fragment from the data node and add it to the fragtree. */
newfrag = new_fragment(fd, fd->ofs, fd->size);
if (unlikely(!newfrag))
return ENOMEM;
@ -735,6 +760,7 @@ chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
if (ret)
return ret;
/* Check previous fragment. */
if (newfrag->ofs & (PAGE_SIZE - 1)) {
struct chfs_node_frag *prev = frag_prev(&ip->fragtree, newfrag);
@ -743,6 +769,7 @@ chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
CHFS_MARK_REF_NORMAL(prev->node->nref);
}
/* Check next fragment. */
if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE - 1)) {
struct chfs_node_frag *next = frag_next(&ip->fragtree, newfrag);
@ -757,12 +784,7 @@ chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
}
/*
* -----------------------
* general node operations
* -----------------------
*/
/* get tmp nodes of an inode */
/* chfs_get_data_nodes - get temporary nodes of an inode */
int
chfs_get_data_nodes(struct chfs_mount *chmp,
struct chfs_inode *ip,
@ -785,6 +807,7 @@ chfs_get_data_nodes(struct chfs_mount *chmp,
nref = chfs_first_valid_data_ref(ip->chvc->dnode);
/* Update highest version. */
rii->highest_version = ip->chvc->highest_version;
while(nref && (struct chfs_vnode_cache *)nref != ip->chvc) {
@ -793,29 +816,33 @@ chfs_get_data_nodes(struct chfs_mount *chmp,
goto out;
dnode = (struct chfs_flash_data_node*)buf;
//check header crc
/* Check header crc. */
crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
if (crc != le32toh(dnode->hdr_crc)) {
chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc));
goto cont;
}
//check header magic bitmask
/* Check header magic bitmask. */
if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
chfs_err("Wrong magic bitmask.\n");
goto cont;
}
//check node crc
/* Check node crc. */
crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
if (crc != le32toh(dnode->node_crc)) {
chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
goto cont;
}
td = chfs_alloc_tmp_dnode();
if (!td) {
chfs_err("Can't allocate tmp dnode info.\n");
err = ENOMEM;
goto out;
}
/* We don't check data crc here, just add nodes to tmp frag tree, because
* we don't want to check nodes which have been overlapped by a new node
* with a higher version number.
@ -838,6 +865,7 @@ chfs_get_data_nodes(struct chfs_mount *chmp,
rii->highest_version = td->version;
}
/* Add node to the tree. */
err = chfs_add_tmp_dnode_to_tree(chmp, rii, td);
if (err)
goto out_full_dnode;
@ -849,7 +877,6 @@ cont:
ip->chvc->highest_version = rii->highest_version;
return 0;
/* Exit points */
out_full_dnode:
chfs_free_full_dnode(td->node);
out_tmp_dnode:
@ -861,28 +888,31 @@ out:
}
/* Build final normal fragtree from tdi tree. */
/* chfs_build_fragtree - builds fragtree from temporary tree */
int
chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
struct chfs_readinode_info *rii)
{
struct chfs_tmp_dnode_info *pen, *last, *this;
struct rb_tree ver_tree; /* version tree */
struct rb_tree ver_tree; /* version tree, used only temporary */
uint64_t high_ver = 0;
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
rb_tree_init(&ver_tree, &tmp_node_rbtree_ops);
/* Update highest version and latest node reference. */
if (rii->mdata_tn) {
high_ver = rii->mdata_tn->tmpnode->version;
rii->latest_ref = rii->mdata_tn->tmpnode->node->nref;
}
/* Iterate the temporary tree in reverse order. */
pen = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&rii->tdi_root);
while((last = pen)) {
pen = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&rii->tdi_root, last, RB_DIR_LEFT);
/* We build here a version tree from overlapped nodes. */
rb_tree_remove_node(&rii->tdi_root, last);
rb_tree_insert_node(&ver_tree, last);
@ -895,6 +925,7 @@ chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
this = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&ver_tree);
/* Start to build the fragtree. */
while (this) {
struct chfs_tmp_dnode_info *vers_next;
int ret;
@ -906,6 +937,7 @@ chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
while (tmp_td) {
struct chfs_tmp_dnode *next_td = tmp_td->next;
/* Check temporary node. */
if (chfs_check_td_node(chmp, tmp_td)) {
if (next_td) {
chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
@ -920,8 +952,10 @@ chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
rii->latest_ref = tmp_td->node->nref;
}
/* Add node to inode and its fragtree. */
ret = chfs_add_full_dnode_to_inode(chmp, ip, tmp_td->node);
if (ret) {
/* On error, clean the whole version tree. */
while (1) {
vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT);
while (tmp_td) {
@ -942,13 +976,15 @@ chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
return ret;
}
/* Remove temporary node from temporary descriptor.
* Shouldn't obsolete tmp_td here, because tmp_td->node
* was added to the inode. */
chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
chfs_free_tmp_dnode(tmp_td);
// shouldn't obsolete tmp_td here,
// because tmp_td->node was added to the inode
}
tmp_td = next_td;
}
/* Continue with the previous element of version tree. */
chfs_kill_tdi(chmp, this);
this = vers_next;
}
@ -957,6 +993,7 @@ chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
return 0;
}
/* chfs_read_inode - checks the state of the inode then reads and builds it */
int chfs_read_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
{
struct chfs_vnode_cache *vc = ip->chvc;
@ -966,26 +1003,27 @@ int chfs_read_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
retry:
mutex_enter(&chmp->chm_lock_vnocache);
switch (vc->state) {
case VNO_STATE_UNCHECKED:
case VNO_STATE_CHECKEDABSENT:
vc->state = VNO_STATE_READING;
break;
case VNO_STATE_CHECKING:
case VNO_STATE_GC:
mutex_exit(&chmp->chm_lock_vnocache);
//sleep_on_spinunlock(&chmp->chm_lock_vnocache);
//KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
goto retry;
break;
case VNO_STATE_PRESENT:
case VNO_STATE_READING:
chfs_err("Reading inode #%llu in state %d!\n",
(unsigned long long)vc->vno, vc->state);
chfs_err("wants to read a nonexistent ino %llu\n",
(unsigned long long)vc->vno);
return ENOENT;
default:
panic("BUG() Bad vno cache state.");
case VNO_STATE_UNCHECKED:
/* FALLTHROUGH */
case VNO_STATE_CHECKEDABSENT:
vc->state = VNO_STATE_READING;
break;
case VNO_STATE_CHECKING:
/* FALLTHROUGH */
case VNO_STATE_GC:
mutex_exit(&chmp->chm_lock_vnocache);
goto retry;
break;
case VNO_STATE_PRESENT:
/* FALLTHROUGH */
case VNO_STATE_READING:
chfs_err("Reading inode #%llu in state %d!\n",
(unsigned long long)vc->vno, vc->state);
chfs_err("wants to read a nonexistent ino %llu\n",
(unsigned long long)vc->vno);
return ENOENT;
default:
panic("BUG() Bad vno cache state.");
}
mutex_exit(&chmp->chm_lock_vnocache);
@ -993,9 +1031,8 @@ retry:
}
/*
* Read inode frags.
* Firstly get tmp nodes,
* secondly build fragtree from those.
* chfs_read_inode_internal - reads and builds an inode
* Firstly get temporary nodes then build fragtree.
*/
int
chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
@ -1014,7 +1051,7 @@ chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
rb_tree_init(&rii.tdi_root, &tmp_node_rbtree_ops);
/* build up a temp node frag tree */
/* Build a temporary node tree. */
err = chfs_get_data_nodes(chmp, ip, &rii);
if (err) {
if (ip->chvc->state == VNO_STATE_READING)
@ -1023,10 +1060,9 @@ chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
return err;
}
/* Build fragtree from temp nodes. */
rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
/*
* build fragtree from temp nodes
*/
err = chfs_build_fragtree(chmp, ip, &rii);
if (err) {
if (ip->chvc->state == VNO_STATE_READING)
@ -1043,9 +1079,7 @@ chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
if (!buf)
return ENOMEM;
/*
* set inode size from chvc->v
*/
/* Set inode size from its vnode information node. */
err = chfs_read_leb(chmp, ip->chvc->v->nref_lnr, buf, CHFS_GET_OFS(ip->chvc->v->nref_offset), len, &retlen);
if (err || retlen != len) {
kmem_free(buf, len);
@ -1070,6 +1104,7 @@ chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
return 0;
}
/* chfs_read_data - reads and checks data of a file */
int
chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
struct buf *bp)
@ -1086,6 +1121,7 @@ chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
memset(bp->b_data, 0, bp->b_bcount);
/* Calculate the size of the file from its fragtree. */
ofs = bp->b_blkno * PAGE_SIZE;
frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->fragtree, &ofs);
@ -1101,11 +1137,11 @@ chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
}
nref = frag->node->nref;
size = sizeof(*dnode) + frag->size;
buf = kmem_alloc(size, KM_SLEEP);
/* Read node from flash. */
dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset), size);
err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), size, &retlen);
if (err) {
@ -1118,6 +1154,7 @@ chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
goto out;
}
/* Read data from flash. */
dnode = (struct chfs_flash_data_node *)buf;
crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
if (crc != le32toh(dnode->hdr_crc)) {
@ -1125,19 +1162,23 @@ chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
err = EIO;
goto out;
}
//check header magic bitmask
/* Check header magic bitmask. */
if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
chfs_err("Wrong magic bitmask.\n");
err = EIO;
goto out;
}
//check node crc
/* Check crc of node. */
crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
if (crc != le32toh(dnode->node_crc)) {
chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
err = EIO;
goto out;
}
/* Check crc of data. */
crc = crc32(0, (uint8_t *)dnode->data, dnode->data_length);
if (crc != le32toh(dnode->data_crc)) {
chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->data_crc));

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_scan.c,v 1.3 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_scan.c,v 1.4 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -31,19 +31,10 @@
* SUCH DAMAGE.
*/
/*
* chfs_scan.c
*
* Created on: 2009.11.05.
* Author: dtengeri
*/
#include "chfs.h"
/**
/*
* chfs_scan_make_vnode_cache - makes a new vnode cache during scan
* @chmp: CHFS main descriptor structure
* @vno: vnode identifier
* This function returns a vnode cache belonging to @vno.
*/
struct chfs_vnode_cache *
@ -53,15 +44,18 @@ chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
/* vnode cache already exists */
vc = chfs_vnode_cache_get(chmp, vno);
if (vc) {
return vc;
}
/* update max vnode number if needed */
if (vno > chmp->chm_max_vno) {
chmp->chm_max_vno = vno;
}
/* create new vnode cache */
vc = chfs_vnode_cache_alloc(vno);
chfs_vnode_cache_add(chmp, vc);
@ -75,9 +69,8 @@ chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
return vc;
}
/**
/*
* chfs_scan_check_node_hdr - checks node magic and crc
* @nhdr: node header to check
* Returns 0 if everything is OK, error code otherwise.
*/
int
@ -104,13 +97,7 @@ chfs_scan_check_node_hdr(struct chfs_flash_node_hdr *nhdr)
return CHFS_NODE_OK;
}
/**
* chfs_scan_check_vnode - check vnode crc and add to vnode cache
* @chmp: CHFS main descriptor structure
* @cheb: eraseblock informations
* @buf: vnode to check
* @ofs: offset in eraseblock where vnode starts
*/
/* chfs_scan_check_vnode - check vnode crc and add it to vnode cache */
int
chfs_scan_check_vnode(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, void *buf, off_t ofs)
@ -126,6 +113,7 @@ chfs_scan_check_vnode(struct chfs_mount *chmp,
crc = crc32(0, (uint8_t *)vnode,
sizeof(struct chfs_flash_vnode) - 4);
/* check node crc */
if (crc != le32toh(vnode->node_crc)) {
err = chfs_update_eb_dirty(chmp,
cheb, le32toh(vnode->length));
@ -138,6 +126,7 @@ chfs_scan_check_vnode(struct chfs_mount *chmp,
vno = le64toh(vnode->vno);
/* find the corresponding vnode cache */
mutex_enter(&chmp->chm_lock_vnocache);
vc = chfs_vnode_cache_get(chmp, vno);
if (!vc) {
@ -154,11 +143,9 @@ chfs_scan_check_vnode(struct chfs_mount *chmp,
KASSERT(nref->nref_lnr == cheb->lnr);
/* Check version of vnode. */
/* check version of vnode */
if ((struct chfs_vnode_cache *)vc->v != vc) {
if (le64toh(vnode->version) > *vc->vno_version) {
//err = chfs_update_eb_dirty(chmp, &chmp->chm_blocks[vc->v->lnr],
// sizeof(struct chfs_flash_vnode));
*vc->vno_version = le64toh(vnode->version);
chfs_add_vnode_ref_to_vc(chmp, vc, nref);
} else {
@ -175,8 +162,8 @@ chfs_scan_check_vnode(struct chfs_mount *chmp,
}
mutex_exit(&chmp->chm_lock_vnocache);
/* update sizes */
mutex_enter(&chmp->chm_lock_sizes);
//dbg("B:lnr: %d |free_size: %d node's size: %d\n", cheb->lnr, cheb->free_size, le32toh(vnode->length));
chfs_change_size_free(chmp, cheb, -le32toh(vnode->length));
chfs_change_size_used(chmp, cheb, le32toh(vnode->length));
mutex_exit(&chmp->chm_lock_sizes);
@ -185,40 +172,21 @@ chfs_scan_check_vnode(struct chfs_mount *chmp,
KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
//dbg(" A: free_size: %d\n", cheb->free_size);
/*dbg("vnode dump:\n");
dbg(" ->magic: 0x%x\n", le16toh(vnode->magic));
dbg(" ->type: %d\n", le16toh(vnode->type));
dbg(" ->length: %d\n", le32toh(vnode->length));
dbg(" ->hdr_crc: 0x%x\n", le32toh(vnode->hdr_crc));
dbg(" ->vno: %d\n", le64toh(vnode->vno));
dbg(" ->version: %ld\n", le64toh(vnode->version));
dbg(" ->uid: %d\n", le16toh(vnode->uid));
dbg(" ->gid: %d\n", le16toh(vnode->gid));
dbg(" ->mode: %d\n", le32toh(vnode->mode));
dbg(" ->dn_size: %d\n", le32toh(vnode->dn_size));
dbg(" ->atime: %d\n", le32toh(vnode->atime));
dbg(" ->mtime: %d\n", le32toh(vnode->mtime));
dbg(" ->ctime: %d\n", le32toh(vnode->ctime));
dbg(" ->dsize: %d\n", le32toh(vnode->dsize));
dbg(" ->node_crc: 0x%x\n", le32toh(vnode->node_crc));*/
return CHFS_NODE_OK;
}
/* chfs_scan_mark_dirent_obsolete - marks a directory entry "obsolete" */
int
chfs_scan_mark_dirent_obsolete(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc, struct chfs_dirent *fd)
{
//int size;
struct chfs_eraseblock *cheb;
struct chfs_node_ref *prev, *nref;
nref = fd->nref;
cheb = &chmp->chm_blocks[fd->nref->nref_lnr];
/* Remove dirent's node ref from vnode cache */
/* remove dirent's node ref from vnode cache */
prev = vc->dirents;
if (prev && prev == nref) {
vc->dirents = prev->nref_next;
@ -232,25 +200,13 @@ chfs_scan_mark_dirent_obsolete(struct chfs_mount *chmp,
}
}
/*dbg("XXX - start\n");
//nref = vc->dirents;
struct chfs_dirent *tmp;
tmp = vc->scan_dirents;
while (tmp) {
dbg(" ->tmp->name: %s\n", tmp->name);
dbg(" ->tmp->version: %ld\n", tmp->version);
dbg(" ->tmp->vno: %d\n", tmp->vno);
tmp = tmp->next;
}
dbg("XXX - end\n");*/
//size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) + fd->nsize);
KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
return 0;
}
/* chfs_add_fd_to_list - adds a directory entry to its parent's vnode cache */
void
chfs_add_fd_to_list(struct chfs_mount *chmp,
struct chfs_dirent *new, struct chfs_vnode_cache *pvc)
@ -258,11 +214,11 @@ chfs_add_fd_to_list(struct chfs_mount *chmp,
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
int size;
struct chfs_eraseblock *cheb, *oldcheb;
// struct chfs_dirent **prev;
struct chfs_dirent *fd, *tmpfd;
dbg("adding fd to list: %s\n", new->name);
/* update highest version if needed */
if ((new->version > pvc->highest_version))
pvc->highest_version = new->version;
@ -279,7 +235,6 @@ chfs_add_fd_to_list(struct chfs_mount *chmp,
} else if (fd->nhash == new->nhash &&
!strcmp(fd->name, new->name)) {
if (new->version > fd->version) {
// new->next = fd->next;
/* replace fd with new */
TAILQ_INSERT_BEFORE(fd, new, fds);
chfs_change_size_free(chmp, cheb, -size);
@ -294,26 +249,13 @@ chfs_add_fd_to_list(struct chfs_mount *chmp,
chfs_change_size_dirty(chmp, oldcheb, size);
}
chfs_free_dirent(fd);
// *prev = new;//XXX
} else {
/* new dirent is older */
chfs_scan_mark_dirent_obsolete(chmp, pvc, new);
chfs_change_size_free(chmp, cheb, -size);
chfs_change_size_dirty(chmp, cheb, size);
chfs_free_dirent(new);
}
/*dbg("START\n");
fd = pvc->scan_dirents;
while (fd) {
dbg("dirent dump:\n");
dbg(" ->vno: %d\n", fd->vno);
dbg(" ->version: %ld\n", fd->version);
dbg(" ->nhash: 0x%x\n", fd->nhash);
dbg(" ->nsize: %d\n", fd->nsize);
dbg(" ->name: %s\n", fd->name);
dbg(" ->type: %d\n", fd->type);
fd = fd->next;
}
dbg("END\n");*/
mutex_exit(&chmp->chm_lock_sizes);
return;
}
@ -322,38 +264,17 @@ chfs_add_fd_to_list(struct chfs_mount *chmp,
TAILQ_INSERT_TAIL(&pvc->scan_dirents, new, fds);
out:
//dbg("B:lnr: %d |free_size: %d size: %d\n", cheb->lnr, cheb->free_size, size);
/* update sizes */
chfs_change_size_free(chmp, cheb, -size);
chfs_change_size_used(chmp, cheb, size);
mutex_exit(&chmp->chm_lock_sizes);
KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
//dbg(" A: free_size: %d\n", cheb->free_size);
KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
// fd = pvc->scan_dirents;
/*dbg("START\n");
while (fd) {
dbg("dirent dump:\n");
dbg(" ->vno: %d\n", fd->vno);
dbg(" ->version: %ld\n", fd->version);
dbg(" ->nhash: 0x%x\n", fd->nhash);
dbg(" ->nsize: %d\n", fd->nsize);
dbg(" ->name: %s\n", fd->name);
dbg(" ->type: %d\n", fd->type);
fd = fd->next;
}
dbg("END\n");*/
}
/**
* chfs_scan_check_dirent_node - check vnode crc and add to vnode cache
* @chmp: CHFS main descriptor structure
* @cheb: eraseblock informations
* @buf: directory entry to check
* @ofs: offset in eraseblock where dirent starts
*/
/* chfs_scan_check_dirent_node - check vnode crc and add to vnode cache */
int
chfs_scan_check_dirent_node(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, void *buf, off_t ofs)
@ -364,8 +285,7 @@ chfs_scan_check_dirent_node(struct chfs_mount *chmp,
struct chfs_vnode_cache *parentvc;
struct chfs_flash_dirent_node *dirent = buf;
//struct chfs_node_ref *tmp;
/* check crc */
crc = crc32(0, (uint8_t *)dirent, sizeof(*dirent) - 4);
if (crc != le32toh(dirent->node_crc)) {
err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
@ -373,12 +293,15 @@ chfs_scan_check_dirent_node(struct chfs_mount *chmp,
return err;
return CHFS_NODE_BADCRC;
}
/* allocate space for name */
namelen = dirent->nsize;
fd = chfs_alloc_dirent(namelen + 1);
if (!fd)
return ENOMEM;
/* allocate an nref */
fd->nref = chfs_alloc_node_ref(cheb);
if (!fd->nref)
return ENOMEM;
@ -399,7 +322,7 @@ chfs_scan_check_dirent_node(struct chfs_mount *chmp,
return CHFS_NODE_BADNAMECRC;
}
/* Check vnode_cache of parent node */
/* check vnode_cache of parent node */
mutex_enter(&chmp->chm_lock_vnocache);
parentvc = chfs_scan_make_vnode_cache(chmp, le64toh(dirent->pvno));
if (!parentvc) {
@ -412,66 +335,18 @@ chfs_scan_check_dirent_node(struct chfs_mount *chmp,
dbg("add dirent to #%llu\n", (unsigned long long)parentvc->vno);
chfs_add_node_to_list(chmp, parentvc, fd->nref, &parentvc->dirents);
mutex_exit(&chmp->chm_lock_vnocache);
/*tmp = vc->dirents;
dbg("START|vno: %d dirents dump\n", vc->vno);
while (tmp) {
dbg(" ->nref->nref_lnr: %d\n", tmp->lnr);
dbg(" ->nref->nref_offset: %d\n", tmp->offset);
tmp = tmp->next;
}
dbg(" END|vno: %d dirents dump\n", vc->vno);*/
// fd->next = NULL;
fd->vno = le64toh(dirent->vno);
fd->version = le64toh(dirent->version);
fd->nhash = hash32_buf(fd->name, namelen, HASH32_BUF_INIT);
fd->type = dirent->dtype;
/*dbg("dirent dump:\n");
dbg(" ->vno: %d\n", fd->vno);
dbg(" ->version: %ld\n", fd->version);
dbg(" ->nhash: 0x%x\n", fd->nhash);
dbg(" ->nsize: %d\n", fd->nsize);
dbg(" ->name: %s\n", fd->name);
dbg(" ->type: %d\n", fd->type);*/
chfs_add_fd_to_list(chmp, fd, parentvc);
/*struct chfs_node_ref *tmp;
tmp = vc->dirents;
dbg("START|vno: %d dirents dump\n", vc->vno);
while (tmp) {
dbg(" ->nref->nref_lnr: %d\n", tmp->lnr);
dbg(" ->nref->nref_offset: %d\n", tmp->offset);
tmp = tmp->next;
}
dbg(" END|vno: %d dirents dump\n", vc->vno);*/
/*dbg("dirent dump:\n");
dbg(" ->magic: 0x%x\n", le16toh(dirent->magic));
dbg(" ->type: %d\n", le16toh(dirent->type));
dbg(" ->length: %d\n", le32toh(dirent->length));
dbg(" ->hdr_crc: 0x%x\n", le32toh(dirent->hdr_crc));
dbg(" ->vno: %d\n", le64toh(dirent->vno));
dbg(" ->pvno: %d\n", le64toh(dirent->pvno));
dbg(" ->version: %ld\n", le64toh(dirent->version));
dbg(" ->mctime: %d\n", le32toh(dirent->mctime));
dbg(" ->nsize: %d\n", dirent->nsize);
dbg(" ->dtype: %d\n", dirent->dtype);
dbg(" ->name_crc: 0x%x\n", le32toh(dirent->name_crc));
dbg(" ->node_crc: 0x%x\n", le32toh(dirent->node_crc));
dbg(" ->name: %s\n", dirent->name);*/
return CHFS_NODE_OK;
}
/**
* chfs_scan_check_data_node - check vnode crc and add to vnode cache
* @chmp: CHFS main descriptor structure
* @cheb: eraseblock informations
* @buf: data node to check
* @ofs: offset in eraseblock where data node starts
*/
/* chfs_scan_check_data_node - check vnode crc and add to vnode cache */
int
chfs_scan_check_data_node(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, void *buf, off_t ofs)
@ -483,6 +358,7 @@ chfs_scan_check_data_node(struct chfs_mount *chmp,
struct chfs_vnode_cache *vc;
struct chfs_flash_data_node *dnode = buf;
/* check crc */
crc = crc32(0, (uint8_t *)dnode, sizeof(struct chfs_flash_data_node) - 4);
if (crc != le32toh(dnode->node_crc)) {
err = chfs_update_eb_dirty(chmp, cheb, le32toh(dnode->length));
@ -490,7 +366,7 @@ chfs_scan_check_data_node(struct chfs_mount *chmp,
return err;
return CHFS_NODE_BADCRC;
}
/**
/*
* Don't check data nodes crc and version here, it will be done in
* the background GC thread.
*/
@ -515,6 +391,7 @@ chfs_scan_check_data_node(struct chfs_mount *chmp,
dbg("chmpfree: %u, chebfree: %u, dnode: %u\n", chmp->chm_free_size, cheb->free_size, dnode->length);
/* update sizes */
mutex_enter(&chmp->chm_lock_sizes);
chfs_change_size_free(chmp, cheb, -dnode->length);
chfs_change_size_unchecked(chmp, cheb, dnode->length);
@ -522,11 +399,7 @@ chfs_scan_check_data_node(struct chfs_mount *chmp,
return CHFS_NODE_OK;
}
/**
* chfs_scan_classify_cheb - determine eraseblock's state
* @chmp: CHFS main descriptor structure
* @cheb: eraseblock to classify
*/
/* chfs_scan_classify_cheb - determine eraseblock's state */
int
chfs_scan_classify_cheb(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb)
@ -542,10 +415,8 @@ chfs_scan_classify_cheb(struct chfs_mount *chmp,
}
/**
/*
* chfs_scan_eraseblock - scans an eraseblock and looking for nodes
* @chmp: CHFS main descriptor structure
* @cheb: eraseblock to scan
*
* This function scans a whole eraseblock, checks the nodes on it and add them
* to the vnode cache.
@ -553,8 +424,8 @@ chfs_scan_classify_cheb(struct chfs_mount *chmp,
*/
int
chfs_scan_eraseblock(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb) {
struct chfs_eraseblock *cheb)
{
int err;
size_t len, retlen;
off_t ofs = 0;
@ -564,7 +435,6 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
int read_free = 0;
struct chfs_node_ref *nref;
dbg("scanning eraseblock content: %d free_size: %d\n", cheb->lnr, cheb->free_size);
dbg("scanned physical block: %d\n", chmp->chm_ebh->lmap[lnr]);
buf = kmem_alloc(CHFS_MAX_NODE_SIZE, KM_SLEEP);
@ -586,7 +456,7 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
/* first we check if the buffer we read is full with 0xff, if yes maybe
* the blocks remaining area is free. We increase read_free and if it
* reaches MAX_READ_FREE we stop reading the block*/
* reaches MAX_READ_FREE we stop reading the block */
if (check_pattern(buf, 0xff, 0, CHFS_NODE_HDR_SIZE)) {
read_free += CHFS_NODE_HDR_SIZE;
if (read_free >= MAX_READ_FREE(chmp)) {
@ -620,8 +490,8 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
}
switch (le16toh(nhdr->type)) {
case CHFS_NODETYPE_VNODE:
/* Read up the node */
//dbg("nodetype vnode\n");
/* vnode information */
/* read up the node */
len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
err = chfs_read_leb(chmp,
lnr, buf + CHFS_NODE_HDR_SIZE,
@ -642,11 +512,10 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
return err;
}
//dbg("XXX5end\n");
break;
case CHFS_NODETYPE_DIRENT:
/* Read up the node */
//dbg("nodetype dirent\n");
/* directory entry */
/* read up the node */
len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
err = chfs_read_leb(chmp,
@ -670,10 +539,9 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
return err;
}
//dbg("XXX6end\n");
break;
case CHFS_NODETYPE_DATA:
//dbg("nodetype data\n");
/* data node */
len = sizeof(struct chfs_flash_data_node) -
CHFS_NODE_HDR_SIZE;
err = chfs_read_leb(chmp,
@ -694,12 +562,9 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
if (err)
return err;
//dbg("XXX7end\n");
break;
case CHFS_NODETYPE_PADDING:
//dbg("nodetype padding\n");
//dbg("padding len: %d\n", le32toh(nhdr->length));
//dbg("BEF: cheb->free_size: %d\n", cheb->free_size);
/* padding node, set size and update dirty */
nref = chfs_alloc_node_ref(cheb);
nref->nref_offset = ofs - CHFS_NODE_HDR_SIZE;
nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
@ -707,21 +572,17 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
err = chfs_update_eb_dirty(chmp, cheb,
le32toh(nhdr->length));
//dbg("AFT: cheb->free_size: %d\n", cheb->free_size);
if (err)
return err;
//dbg("XXX8end\n");
break;
default:
//dbg("nodetype ? (default)\n");
/* Unknown node type, update dirty and skip */
/* unknown node type, update dirty and skip */
err = chfs_update_eb_dirty(chmp, cheb,
le32toh(nhdr->length));
if (err)
return err;
//dbg("XXX9end\n");
break;
}
ofs += le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
@ -730,6 +591,5 @@ chfs_scan_eraseblock(struct chfs_mount *chmp,
KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
//dbg("XXX10\n");
return chfs_scan_classify_cheb(chmp, cheb);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_subr.c,v 1.7 2012/08/22 09:20:13 ttoth Exp $ */
/* $NetBSD: chfs_subr.c,v 1.8 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -32,10 +32,6 @@
* SUCH DAMAGE.
*/
/*
* Efficient memory file system supporting functions.
*/
#include <sys/cdefs.h>
#include <sys/param.h>
@ -58,12 +54,10 @@
#include <miscfs/specfs/specdev.h>
#include <miscfs/genfs/genfs.h>
#include "chfs.h"
//#include <fs/chfs/chfs_vnops.h>
//#include </root/xipffs/netbsd.chfs/chfs.h>
/* --------------------------------------------------------------------- */
/*
* chfs_mem_info -
* Returns information about the number of available memory pages,
* including physical and virtual ones.
*
@ -97,9 +91,8 @@ chfs_mem_info(bool total)
}
/* --------------------------------------------------------------------- */
/*
* chfs_dir_lookup -
* Looks for a directory entry in the directory represented by node.
* 'cnp' describes the name of the entry to look for. Note that the .
* and .. components are not allowed as they do not physically exist
@ -117,37 +110,27 @@ chfs_dir_lookup(struct chfs_inode *ip, struct componentname *cnp)
KASSERT(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.'));
KASSERT(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' &&
cnp->cn_nameptr[1] == '.')));
//CHFS_VALIDATE_DIR(node);
//node->chn_status |= CHFS_NODE_ACCESSED;
found = false;
// fd = ip->dents;
// while(fd) {
TAILQ_FOREACH(fd, &ip->dents, fds) {
KASSERT(cnp->cn_namelen < 0xffff);
if (fd->vno == 0)
continue;
/*dbg("dirent dump:\n");
dbg(" ->vno: %d\n", fd->vno);
dbg(" ->version: %ld\n", fd->version);
dbg(" ->nhash: 0x%x\n", fd->nhash);
dbg(" ->nsize: %d\n", fd->nsize);
dbg(" ->name: %s\n", fd->name);
dbg(" ->type: %d\n", fd->type);*/
if (fd->nsize == (uint16_t)cnp->cn_namelen &&
memcmp(fd->name, cnp->cn_nameptr, fd->nsize) == 0) {
found = true;
break;
}
// fd = fd->next;
}
return found ? fd : NULL;
}
/* --------------------------------------------------------------------- */
/*
* chfs_filldir -
* Creates a (kernel) dirent and moves it to the given memory address.
* Used during readdir.
*/
int
chfs_filldir(struct uio* uio, ino_t ino, const char *name,
int namelen, enum chtype type)
@ -203,11 +186,8 @@ chfs_filldir(struct uio* uio, ino_t ino, const char *name,
return error;
}
/* --------------------------------------------------------------------- */
/*
* Change size of the given vnode.
* chfs_chsize - change size of the given vnode
* Caller should execute chfs_update on vp after a successful execution.
* The vnode must be locked on entry and remain locked on exit.
*/
@ -255,61 +235,18 @@ chfs_chsize(struct vnode *vp, u_quad_t size, kauth_cred_t cred)
ubc_zerorange(&vp->v_uobj, size, ip->size - size, UBC_UNMAP_FLAG(vp));
}
/* drop unused fragments */
chfs_truncate_fragtree(ip->chmp, &ip->fragtree, size);
uvm_vnp_setsize(vp, size);
chfs_set_vnode_size(vp, size);
ip->iflag |= IN_CHANGE | IN_UPDATE;
mutex_exit(&chmp->chm_lock_mountfields);
return 0;
}
#if 0
int error;
struct chfs_node *node;
KASSERT(VOP_ISLOCKED(vp));
node = VP_TO_CHFS_NODE(vp);
// Decide whether this is a valid operation based on the file type.
error = 0;
switch (vp->v_type) {
case VDIR:
return EISDIR;
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return EROFS;
break;
case VBLK:
case VCHR:
case VFIFO:
// Allow modifications of special files even if in the file
// system is mounted read-only (we are not modifying the
// files themselves, but the objects they represent).
return 0;
default:
return ENODEV;
}
// Immutable or append-only files cannot be modified, either.
if (node->chn_flags & (IMMUTABLE | APPEND))
return EPERM;
error = chfs_truncate(vp, size);
// chfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
// for us, as will update dn_status; no need to do that here.
KASSERT(VOP_ISLOCKED(vp));
return error;
#endif
/* --------------------------------------------------------------------- */
/*
* Change flags of the given vnode.
* chfs_chflags - change flags of the given vnode
* Caller should execute chfs_update on vp after a successful execution.
* The vnode must be locked on entry and remain locked on exit.
*/
@ -365,13 +302,12 @@ chfs_chflags(struct vnode *vp, int flags, kauth_cred_t cred)
return error;
}
/* --------------------------------------------------------------------- */
/* chfs_itimes - updates a vnode times to the given data */
void
chfs_itimes(struct chfs_inode *ip, const struct timespec *acc,
const struct timespec *mod, const struct timespec *cre)
{
//dbg("itimes\n");
struct timespec now;
if (!(ip->iflag & (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY))) {
@ -388,7 +324,6 @@ chfs_itimes(struct chfs_inode *ip, const struct timespec *acc,
if (mod == NULL)
mod = &now;
ip->mtime = mod->tv_sec;
//ip->i_modrev++;
}
if (ip->iflag & (IN_CHANGE | IN_MODIFY)) {
if (cre == NULL)
@ -402,61 +337,18 @@ chfs_itimes(struct chfs_inode *ip, const struct timespec *acc,
ip->iflag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
}
/* --------------------------------------------------------------------- */
/* chfs_update - updates a vnode times */
int
chfs_update(struct vnode *vp, const struct timespec *acc,
const struct timespec *mod, int flags)
{
struct chfs_inode *ip;
/* XXX ufs_reclaim calls this function unlocked! */
// KASSERT(VOP_ISLOCKED(vp));
#if 0
if (flags & UPDATE_CLOSE)
; /* XXX Need to do anything special? */
#endif
ip = VTOI(vp);
chfs_itimes(ip, acc, mod, NULL);
// KASSERT(VOP_ISLOCKED(vp));
return (0);
}
/* --------------------------------------------------------------------- */
/*
int
chfs_truncate(struct vnode *vp, off_t length)
{
bool extended;
int error;
struct chfs_node *node;
printf("CHFS: truncate()\n");
node = VP_TO_CHFS_NODE(vp);
extended = length > node->chn_size;
if (length < 0) {
error = EINVAL;
goto out;
}
if (node->chn_size == length) {
error = 0;
goto out;
}
error = chfs_reg_resize(vp, length);
if (error == 0)
node->chn_status |= CHFS_NODE_CHANGED | CHFS_NODE_MODIFIED;
out:
chfs_update(vp, NULL, NULL, 0);
return error;
}*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_vfsops.c,v 1.5 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_vfsops.c,v 1.6 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -56,13 +56,10 @@
#include <uvm/uvm.h>
#include <uvm/uvm_pager.h>
#include <ufs/ufs/dir.h>
//#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufs_extern.h>
#include <miscfs/genfs/genfs.h>
#include <miscfs/genfs/genfs_node.h>
#include <miscfs/specfs/specdev.h>
//#include </root/xipffs/netbsd.chfs/chfs.h>
//#include </root/xipffs/netbsd.chfs/chfs_args.h>
#include "chfs.h"
#include "chfs_args.h"
@ -103,13 +100,6 @@ const struct genfs_ops chfs_genfsops = {
.gop_markupdate = ufs_gop_markupdate,
};
/*
static const struct ufs_ops chfs_ufsops = {
.uo_itimes = chfs_itimes,
.uo_update = chfs_update,
};
*/
struct pool chfs_inode_pool;
/* for looking up the major for flash */
@ -158,17 +148,13 @@ chfs_mount(struct mount *mp,
if (err) {
return err;
}
/*
* Look up the name and verify that it's sane.
*/
/* Look up the name and verify that it's sane. */
NDINIT(&nd, LOOKUP, FOLLOW, pb);
if ((err = namei(&nd)) != 0 )
return (err);
devvp = nd.ni_vp;
/*
* Be sure this is a valid block device
*/
/* Be sure this is a valid block device */
if (devvp->v_type != VBLK)
err = ENOTBLK;
else if (bdevsw_lookup(devvp->v_rdev) == NULL)
@ -189,7 +175,7 @@ chfs_mount(struct mount *mp,
if (err)
goto fail;
/* call CHFS mount function */
err = chfs_mountfs(devvp, mp);
if (err) {
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
@ -197,6 +183,7 @@ chfs_mount(struct mount *mp,
VOP_UNLOCK(devvp);
goto fail;
}
ump = VFSTOUFS(mp);
chmp = ump->um_chfs;
@ -212,7 +199,7 @@ fail:
return (err);
}
/* chfs_mountfs - init CHFS */
int
chfs_mountfs(struct vnode *devvp, struct mount *mp)
{
@ -239,6 +226,7 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
if (err)
return (err);
/* Setup device. */
flash_major = cdevsw_lookup_major(&flash_cdevsw);
if (devvp->v_type != VBLK)
@ -255,16 +243,16 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
return (err);
}
/* Connect CHFS to UFS. */
ump = kmem_zalloc(sizeof(struct ufsmount), KM_SLEEP);
ump->um_fstype = UFS1;
//ump->um_ops = &chfs_ufsops;
ump->um_chfs = kmem_zalloc(sizeof(struct chfs_mount), KM_SLEEP);
mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
/* Get superblock and set flash device number */
chmp = ump->um_chfs;
/* Initialize erase block handler. */
chmp->chm_ebh = kmem_alloc(sizeof(struct chfs_ebh), KM_SLEEP);
dbg("[]opening flash: %u\n", (unsigned int)devvp->v_rdev);
@ -276,17 +264,19 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
//TODO check flash sizes
/* Initialize vnode cache's hashtable and eraseblock array. */
chmp->chm_gbl_version = 0;
chmp->chm_vnocache_hash = chfs_vnocache_hash_init();
chmp->chm_blocks = kmem_zalloc(chmp->chm_ebh->peb_nr *
sizeof(struct chfs_eraseblock), KM_SLEEP);
/* Initialize mutexes. */
mutex_init(&chmp->chm_lock_mountfields, MUTEX_DEFAULT, IPL_NONE);
mutex_init(&chmp->chm_lock_sizes, MUTEX_DEFAULT, IPL_NONE);
mutex_init(&chmp->chm_lock_vnocache, MUTEX_DEFAULT, IPL_NONE);
//XXX
/* Initialize read/write contants. (from UFS) */
chmp->chm_fs_bmask = -4096;
chmp->chm_fs_bsize = 4096;
chmp->chm_fs_qbmask = 4095;
@ -294,12 +284,13 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
chmp->chm_fs_fmask = -2048;
chmp->chm_fs_qfmask = 2047;
/* Initialize writebuffer. */
chmp->chm_wbuf_pagesize = chmp->chm_ebh->flash_if->page_size;
dbg("wbuf size: %zu\n", chmp->chm_wbuf_pagesize);
chmp->chm_wbuf = kmem_alloc(chmp->chm_wbuf_pagesize, KM_SLEEP);
rw_init(&chmp->chm_lock_wbuf);
//init queues
/* Initialize queues. */
TAILQ_INIT(&chmp->chm_free_queue);
TAILQ_INIT(&chmp->chm_clean_queue);
TAILQ_INIT(&chmp->chm_dirty_queue);
@ -307,8 +298,10 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
TAILQ_INIT(&chmp->chm_erasable_pending_wbuf_queue);
TAILQ_INIT(&chmp->chm_erase_pending_queue);
/* Initialize flash-specific constants. */
chfs_calc_trigger_levels(chmp);
/* Initialize sizes. */
chmp->chm_nr_free_blocks = 0;
chmp->chm_nr_erasable_blocks = 0;
chmp->chm_max_vno = 2;
@ -318,15 +311,19 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
chmp->chm_dirty_size = 0;
chmp->chm_wasted_size = 0;
chmp->chm_free_size = chmp->chm_ebh->eb_size * chmp->chm_ebh->peb_nr;
/* Build filesystem. */
err = chfs_build_filesystem(chmp);
if (err) {
/* Armageddon and return. */
chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash);
ebh_close(chmp->chm_ebh);
err = EIO;
goto fail;
}
/* Initialize UFS. */
mp->mnt_data = ump;
mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_CHFS);
@ -341,19 +338,8 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
ump->um_dev = dev;
ump->um_devvp = devvp;
ump->um_maxfilesize = 1048512 * 1024;
/*TODO fill these fields
ump->um_nindir =
ump->um_lognindir =
ump->um_bptrtodb =
ump->um_seqinc =
ump->um_maxsymlinklen =
ump->um_dirblksiz =
ump->um_maxfilesize =
*/
/*
* Allocate the root vnode.
*/
/* Allocate the root vnode. */
err = VFS_VGET(mp, CHFS_ROOTINO, &vp);
if (err) {
dbg("error: %d while allocating root node\n", err);
@ -361,6 +347,7 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
}
vput(vp);
/* Start GC. */
chfs_gc_thread_start(chmp);
mutex_enter(&chmp->chm_lock_mountfields);
chfs_gc_trigger(chmp);
@ -368,6 +355,7 @@ chfs_mountfs(struct vnode *devvp, struct mount *mp)
devvp->v_specmountpoint = mp;
return 0;
fail:
kmem_free(chmp->chm_ebh, sizeof(struct chfs_ebh));
kmem_free(chmp, sizeof(struct chfs_mount));
@ -377,14 +365,12 @@ fail:
/* --------------------------------------------------------------------- */
/* ARGSUSED2 */
static int
chfs_unmount(struct mount *mp, int mntflags)
{
int flags = 0, i = 0;
struct ufsmount *ump;
struct chfs_mount *chmp;
// struct chfs_vnode_cache *vc, *next;
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
@ -394,8 +380,10 @@ chfs_unmount(struct mount *mp, int mntflags)
ump = VFSTOUFS(mp);
chmp = ump->um_chfs;
/* Stop GC. */
chfs_gc_thread_stop(chmp);
/* Flush everyt buffer. */
(void)vflush(mp, NULLVP, flags);
if (chmp->chm_wbuf_len) {
@ -404,19 +392,24 @@ chfs_unmount(struct mount *mp, int mntflags)
mutex_exit(&chmp->chm_lock_mountfields);
}
/* Free node references. */
for (i = 0; i < chmp->chm_ebh->peb_nr; i++) {
chfs_free_node_refs(&chmp->chm_blocks[i]);
}
/* Destroy vnode cache hashtable. */
chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash);
/* Close eraseblock handler. */
ebh_close(chmp->chm_ebh);
/* Destroy mutexes. */
rw_destroy(&chmp->chm_lock_wbuf);
mutex_destroy(&chmp->chm_lock_vnocache);
mutex_destroy(&chmp->chm_lock_sizes);
mutex_destroy(&chmp->chm_lock_mountfields);
/* Unmount UFS. */
if (ump->um_devvp->v_type != VBAD) {
ump->um_devvp->v_specmountpoint = NULL;
}
@ -426,7 +419,7 @@ chfs_unmount(struct mount *mp, int mntflags)
mutex_destroy(&ump->um_lock);
//kmem_free(ump->um_chfs, sizeof(struct chfs_mount));
/* Everything done. */
kmem_free(ump, sizeof(struct ufsmount));
mp->mnt_data = NULL;
mp->mnt_flag &= ~MNT_LOCAL;
@ -474,6 +467,7 @@ retry:
vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
}
/* Get node from inode hash. */
if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
return 0;
}
@ -496,6 +490,7 @@ retry:
vp->v_vflag |= VV_LOCKSWORK;
/* Initialize vnode/inode. */
memset(ip, 0, sizeof(*ip));
vp->v_data = ip;
ip->vp = vp;
@ -508,12 +503,11 @@ retry:
genfs_node_init(vp, &chfs_genfsops);
rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
//mutex_init(&ip->inode_lock, MUTEX_DEFAULT, IPL_NONE);
chfs_ihashins(ip);
mutex_exit(&chfs_hashlock);
// set root inode
/* Set root inode. */
if (ino == CHFS_ROOTINO) {
dbg("SETROOT\n");
vp->v_vflag |= VV_ROOT;
@ -522,17 +516,16 @@ retry:
ip->mode = IFMT | IEXEC | IWRITE | IREAD;
ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
chfs_update(vp, NULL, NULL, UPDATE_WAIT);
// ip->dents = NULL; XXXTAILQ
TAILQ_INIT(&ip->dents);
chfs_set_vnode_size(vp, 512);
}
// set vnode cache
mutex_enter(&chmp->chm_lock_vnocache);
chvc = chfs_vnode_cache_get(chmp, ino);
mutex_exit(&chmp->chm_lock_vnocache);
if (!chvc) {
dbg("!chvc\n");
/* Initialize the corresponding vnode cache. */
/* XXX, we cant alloc under a lock, refactor this! */
chvc = chfs_vnode_cache_alloc(ino);
mutex_enter(&chmp->chm_lock_vnocache);
@ -549,7 +542,7 @@ retry:
} else {
dbg("chvc\n");
ip->chvc = chvc;
// if we have a vnode cache, the node is already on flash, so read it
/* We had a vnode cache, the node is already on flash, so read it */
if (ino == CHFS_ROOTINO) {
chvc->pvno = CHFS_ROOTINO;
TAILQ_INIT(&chvc->scan_dirents);
@ -558,9 +551,10 @@ retry:
}
mutex_enter(&chmp->chm_lock_mountfields);
// init type specific things
/* Initialize type specific things. */
switch (ip->ch_type) {
case CHT_DIR:
/* Read every dirent. */
nref = chvc->dirents;
while (nref &&
(struct chfs_vnode_cache *)nref != chvc) {
@ -570,8 +564,9 @@ retry:
chfs_set_vnode_size(vp, 512);
break;
case CHT_REG:
/* FALLTHROUGH */
case CHT_SOCK:
//build the fragtree of the vnode
/* Collect data. */
dbg("read_inode_internal | ino: %llu\n",
(unsigned long long)ip->ino);
error = chfs_read_inode(chmp, ip);
@ -583,7 +578,7 @@ retry:
}
break;
case CHT_LNK:
//build the fragtree of the vnode
/* Collect data. */
dbg("read_inode_internal | ino: %llu\n",
(unsigned long long)ip->ino);
error = chfs_read_inode_internal(chmp, ip);
@ -594,6 +589,7 @@ retry:
return (error);
}
/* Set link. */
dbg("size: %llu\n", (unsigned long long)ip->size);
bp = getiobuf(vp, true);
bp->b_blkno = 0;
@ -610,9 +606,11 @@ retry:
break;
case CHT_CHR:
/* FALLTHROUGH */
case CHT_BLK:
/* FALLTHROUGH */
case CHT_FIFO:
//build the fragtree of the vnode
/* Collect data. */
dbg("read_inode_internal | ino: %llu\n",
(unsigned long long)ip->ino);
error = chfs_read_inode_internal(chmp, ip);
@ -623,6 +621,7 @@ retry:
return (error);
}
/* Set device. */
bp = getiobuf(vp, true);
bp->b_blkno = 0;
bp->b_bufsize = bp->b_resid =
@ -633,6 +632,7 @@ retry:
bp->b_data, sizeof(dev_t));
kmem_free(bp->b_data, sizeof(dev_t));
putiobuf(bp);
/* Set specific operations. */
if (ip->ch_type == CHT_FIFO) {
vp->v_op = chfs_fifoop_p;
} else {
@ -642,6 +642,7 @@ retry:
break;
case CHT_BLANK:
/* FALLTHROUGH */
case CHT_BAD:
break;
}
@ -649,7 +650,7 @@ retry:
}
/* finish inode initalization */
/* Finish inode initalization. */
ip->devvp = ump->um_devvp;
vref(ip->devvp);
@ -685,7 +686,6 @@ chfs_start(struct mount *mp, int flags)
/* --------------------------------------------------------------------- */
/* ARGSUSED2 */
static int
chfs_statvfs(struct mount *mp, struct statvfs *sbp)
{
@ -704,14 +704,6 @@ chfs_statvfs(struct mount *mp, struct statvfs *sbp)
sbp->f_blocks = chmp->chm_ebh->peb_nr;
sbp->f_files = 0;
sbp->f_bavail = chmp->chm_nr_free_blocks - chmp->chm_resv_blocks_write;
#if 0
printf("chmp->chm_nr_free_blocks: %jd\n",
(intmax_t )chmp->chm_nr_free_blocks);
printf("chmp->chm_resv_blocks_write: %jd\n",
(intmax_t) chmp->chm_resv_blocks_write);
printf("chmp->chm_ebh->peb_nr: %jd\n",
(intmax_t) chmp->chm_ebh->peb_nr);
#endif
sbp->f_bfree = chmp->chm_nr_free_blocks;
sbp->f_bresvd = chmp->chm_resv_blocks_write;
@ -728,7 +720,6 @@ chfs_statvfs(struct mount *mp, struct statvfs *sbp)
/* --------------------------------------------------------------------- */
/* ARGSUSED0 */
static int
chfs_sync(struct mount *mp, int waitfor,
kauth_cred_t uc)
@ -741,6 +732,7 @@ chfs_sync(struct mount *mp, int waitfor,
static void
chfs_init(void)
{
/* Initialize pools and inode hash. */
chfs_alloc_pool_caches();
chfs_ihashinit();
pool_init(&chfs_inode_pool, sizeof(struct chfs_inode), 0, 0, 0,
@ -801,7 +793,7 @@ struct vfsops chfs_vfsops = {
chfs_start, /* vfs_start */
chfs_unmount, /* vfs_unmount */
chfs_root, /* vfs_root */
ufs_quotactl, /* vfs_quotactl */
ufs_quotactl, /* vfs_quotactl */
chfs_statvfs, /* vfs_statvfs */
chfs_sync, /* vfs_sync */
chfs_vget, /* vfs_vget */
@ -812,16 +804,17 @@ struct vfsops chfs_vfsops = {
chfs_done, /* vfs_done */
NULL, /* vfs_mountroot */
chfs_snapshot, /* vfs_snapshot */
vfs_stdextattrctl, /* vfs_extattrctl */
(void *)eopnotsupp, /* vfs_suspendctl */
vfs_stdextattrctl, /* vfs_extattrctl */
(void *)eopnotsupp, /* vfs_suspendctl */
genfs_renamelock_enter,
genfs_renamelock_exit,
(void *)eopnotsupp,
chfs_vnodeopv_descs,
0, /* vfs_refcount */
0, /* vfs_refcount */
{ NULL, NULL },
};
/* For using CHFS as a module. */
static int
chfs_modcmd(modcmd_t cmd, void *arg)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_vnode.c,v 1.7 2012/08/13 13:12:51 ttoth Exp $ */
/* $NetBSD: chfs_vnode.c,v 1.8 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -42,6 +42,7 @@
#include <miscfs/genfs/genfs.h>
/* chfs_vnode_lookup - lookup for a vnode */
struct vnode *
chfs_vnode_lookup(struct chfs_mount *chmp, ino_t vno)
{
@ -56,8 +57,9 @@ chfs_vnode_lookup(struct chfs_mount *chmp, ino_t vno)
return NULL;
}
/* chfs_readvnode - reads a vnode from the flash and setups its inode */
int
chfs_readvnode(struct mount* mp, ino_t ino, struct vnode** vpp)
chfs_readvnode(struct mount *mp, ino_t ino, struct vnode **vpp)
{
struct ufsmount* ump = VFSTOUFS(mp);
struct chfs_mount *chmp = ump->um_chfs;
@ -81,13 +83,14 @@ chfs_readvnode(struct mount* mp, ino_t ino, struct vnode** vpp)
ip = VTOI(vp);
chvc = ip->chvc;
/* root node is in-memory only */
if (chvc && ino != CHFS_ROOTINO) {
/* debug... */
dbg("offset: %" PRIu32 ", lnr: %d\n",
CHFS_GET_OFS(chvc->v->nref_offset), chvc->v->nref_lnr);
KASSERT((void *)chvc != (void *)chvc->v);
/* reading */
buf = kmem_alloc(len, KM_SLEEP);
err = chfs_read_leb(chmp, chvc->v->nref_lnr, buf,
CHFS_GET_OFS(chvc->v->nref_offset), len, &retlen);
@ -99,17 +102,19 @@ chfs_readvnode(struct mount* mp, ino_t ino, struct vnode** vpp)
return EIO;
}
chfvn = (struct chfs_flash_vnode*)buf;
/* setup inode fields */
chfs_set_vnode_size(vp, chfvn->dn_size);
ip->mode = chfvn->mode;
ip->ch_type = IFTOCHT(ip->mode);
vp->v_type = CHTTOVT(ip->ch_type);
ip->version = chfvn->version;
//ip->chvc->highest_version = ip->version;
ip->uid = chfvn->uid;
ip->gid = chfvn->gid;
ip->atime = chfvn->atime;
ip->mtime = chfvn->mtime;
ip->ctime = chfvn->ctime;
kmem_free(buf, len);
}
@ -118,21 +123,22 @@ chfs_readvnode(struct mount* mp, ino_t ino, struct vnode** vpp)
return 0;
}
/*
* chfs_readddirent -
* reads a directory entry from flash and adds it to its inode
*/
int
chfs_readdirent(struct mount *mp, struct chfs_node_ref *chnr, struct chfs_inode *pdir)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct chfs_mount *chmp = ump->um_chfs;
struct chfs_flash_dirent_node chfdn;
struct chfs_dirent *fd;//, *pdents;
struct chfs_dirent *fd;
size_t len = sizeof(struct chfs_flash_dirent_node);
// struct chfs_vnode_cache* parent;
size_t retlen;
int err = 0;
// parent = chfs_get_vnode_cache(chmp, pdir->ino);
//read flash_dirent_node
/* read flash_dirent_node */
err = chfs_read_leb(chmp, chnr->nref_lnr, (char *)&chfdn,
CHFS_GET_OFS(chnr->nref_offset), len, &retlen);
if (err) {
@ -144,14 +150,14 @@ chfs_readdirent(struct mount *mp, struct chfs_node_ref *chnr, struct chfs_inode
return EIO;
}
//set fields of dirent
/* set fields of dirent */
fd = chfs_alloc_dirent(chfdn.nsize + 1);
fd->version = chfdn.version;
fd->vno = chfdn.vno;
fd->type = chfdn.dtype;
fd->nsize = chfdn.nsize;
// fd->next = NULL;
/* read the name of the dirent */
err = chfs_read_leb(chmp, chnr->nref_lnr, fd->name,
CHFS_GET_OFS(chnr->nref_offset) + len, chfdn.nsize, &retlen);
if (err) {
@ -167,24 +173,12 @@ chfs_readdirent(struct mount *mp, struct chfs_node_ref *chnr, struct chfs_inode
fd->name[fd->nsize] = 0;
fd->nref = chnr;
/* add to inode */
chfs_add_fd_to_inode(chmp, pdir, fd);
/*
pdents = pdir->i_chfs_ext.dents;
if (!pdents)
pdir->i_chfs_ext.dents = fd;
else {
while (pdents->next != NULL) {
pdents = pdents->next;
}
pdents->next = fd;
}
*/
return 0;
}
/*
* Allocate a new inode.
*/
/* chfs_makeinode - makes a new file and initializes its structures */
int
chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
struct componentname *cnp, enum vtype type)
@ -196,19 +190,21 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
struct chfs_vnode_cache* chvc;
int error;
ino_t vno;
struct chfs_dirent *nfd;//, *fd;
struct chfs_dirent *nfd;
dbg("makeinode\n");
pdir = VTOI(dvp);
*vpp = NULL;
/* number of vnode will be the new maximum */
vno = ++(chmp->chm_max_vno);
error = VFS_VGET(dvp->v_mount, vno, &vp);
if (error)
return (error);
/* setup vnode cache */
mutex_enter(&chmp->chm_lock_vnocache);
chvc = chfs_vnode_cache_get(chmp, vno);
@ -222,6 +218,7 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
chvc->state = VNO_STATE_CHECKEDABSENT;
mutex_exit(&chmp->chm_lock_vnocache);
/* setup inode */
ip = VTOI(vp);
ip->ino = vno;
@ -236,14 +233,13 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
ip->chvc = chvc;
//ip->chvc->highest_version = 1;
ip->target = NULL;
ip->mode = mode;
vp->v_type = type; /* Rest init'd in getnewvnode(). */
ip->ch_type = VTTOCHT(vp->v_type);
/* Authorize setting SGID if needed. */
/* authorize setting SGID if needed */
if (ip->mode & ISGID) {
error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
vp, NULL, genfs_can_chmod(vp->v_type, cnp->cn_cred, ip->uid,
@ -252,11 +248,11 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
ip->mode &= ~ISGID;
}
/* write vnode information to the flash */
chfs_update(vp, NULL, NULL, UPDATE_WAIT);
mutex_enter(&chmp->chm_lock_mountfields);
//write inode to flash
error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
if (error) {
mutex_exit(&chmp->chm_lock_mountfields);
@ -264,7 +260,8 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
vput(dvp);
return error;
}
//update parent directory and write it to the flash
/* update parent's vnode information and write it to the flash */
pdir->iflag |= (IN_ACCESS | IN_CHANGE | IN_MODIFY | IN_UPDATE);
chfs_update(dvp, NULL, NULL, UPDATE_WAIT);
@ -277,18 +274,17 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
}
vput(dvp);
//set up node's full dirent
/* setup directory entry */
nfd = chfs_alloc_dirent(cnp->cn_namelen + 1);
nfd->vno = ip->ino;
nfd->version = (++pdir->chvc->highest_version);
nfd->type = ip->ch_type;
// nfd->next = NULL;
nfd->nsize = cnp->cn_namelen;
memcpy(&(nfd->name), cnp->cn_nameptr, cnp->cn_namelen);
nfd->name[nfd->nsize] = 0;
nfd->nhash = hash32_buf(nfd->name, cnp->cn_namelen, HASH32_BUF_INIT);
// write out direntry
/* write out */
error = chfs_write_flash_dirent(chmp, pdir, ip, nfd, ip->ino, ALLOC_NORMAL);
if (error) {
mutex_exit(&chmp->chm_lock_mountfields);
@ -298,19 +294,9 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
//TODO set parent's dir times
/* add dirent to parent */
chfs_add_fd_to_inode(chmp, pdir, nfd);
/*
fd = pdir->i_chfs_ext.dents;
if (!fd)
pdir->i_chfs_ext.dents = nfd;
else {
while (fd->next != NULL) {
fd = fd->next;
}
fd->next = nfd;
}
*/
//pdir->i_nlink++;
pdir->chvc->nlink++;
mutex_exit(&chmp->chm_lock_mountfields);
@ -319,6 +305,7 @@ chfs_makeinode(int mode, struct vnode *dvp, struct vnode **vpp,
return (0);
}
/* chfs_set_vnode_size - updates size of vnode and also inode */
void
chfs_set_vnode_size(struct vnode *vp, size_t size)
{
@ -334,6 +321,11 @@ chfs_set_vnode_size(struct vnode *vp, size_t size)
return;
}
/*
* chfs_change_size_free - updates free size
* "change" parameter is positive if we have to increase the size
* and negative if we have to decrease it
*/
void
chfs_change_size_free(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, int change)
@ -347,6 +339,11 @@ chfs_change_size_free(struct chfs_mount *chmp,
return;
}
/*
* chfs_change_size_dirty - updates dirty size
* "change" parameter is positive if we have to increase the size
* and negative if we have to decrease it
*/
void
chfs_change_size_dirty(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, int change)
@ -360,6 +357,11 @@ chfs_change_size_dirty(struct chfs_mount *chmp,
return;
}
/*
* chfs_change_size_unchecked - updates unchecked size
* "change" parameter is positive if we have to increase the size
* and negative if we have to decrease it
*/
void
chfs_change_size_unchecked(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, int change)
@ -373,6 +375,11 @@ chfs_change_size_unchecked(struct chfs_mount *chmp,
return;
}
/*
* chfs_change_size_used - updates used size
* "change" parameter is positive if we have to increase the size
* and negative if we have to decrease it
*/
void
chfs_change_size_used(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, int change)
@ -386,6 +393,11 @@ chfs_change_size_used(struct chfs_mount *chmp,
return;
}
/*
* chfs_change_size_wasted - updates wasted size
* "change" parameter is positive if we have to increase the size
* and negative if we have to decrease it
*/
void
chfs_change_size_wasted(struct chfs_mount *chmp,
struct chfs_eraseblock *cheb, int change)

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_vnode_cache.c,v 1.2 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_vnode_cache.c,v 1.3 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -35,6 +35,9 @@
#include "chfs.h"
#include <sys/pool.h>
/* vnode cache is a hashtable for vnodes */
/* chfs_vnocache_hash_init - initializing the hashtable */
struct chfs_vnode_cache **
chfs_vnocache_hash_init(void)
{
@ -42,10 +45,8 @@ chfs_vnocache_hash_init(void)
sizeof(struct chfs_vnode_cache *), KM_SLEEP);
}
/**
* chfs_get_vnode_cache - get a vnode_cache from the vnocache_hash
* @chmp: fs super block info
* @ino: inode for search
/*
* chfs_vnode_cache_get - get a vnode_cache from the hashtable
* Returns the vnode_cache.
*/
struct chfs_vnode_cache *
@ -72,11 +73,7 @@ chfs_vnode_cache_get(struct chfs_mount *chmp, ino_t vno)
return ret;
}
/**
* chfs_add_vnode_cache - add a vnode_cache to the vnocache_hash
* @chmp: fs super block info
* @new: new vnode_cache
*/
/* chfs_vnode_cache_add - add a vnode_cache to the hashtable */
void
chfs_vnode_cache_add(struct chfs_mount *chmp,
struct chfs_vnode_cache* new)
@ -98,11 +95,7 @@ chfs_vnode_cache_add(struct chfs_mount *chmp,
*prev = new;
}
/**
* chfs_del_vnode_cache - del a vnode_cache from the vnocache_hash
* @chmp: fs super block info
* @old: old vnode_cache
*/
/* chfs_vnode_cache_remove - removes a vnode_cache from the hashtable */
void
chfs_vnode_cache_remove(struct chfs_mount *chmp,
struct chfs_vnode_cache* old)
@ -126,16 +119,14 @@ chfs_vnode_cache_remove(struct chfs_mount *chmp,
}
}
/**
* chfs_free_vnode_caches - free the vnocache_hash
* @chmp: fs super block info
*/
/* chfs_vnocache_hash_destroy - destroying the vnode cache */
void
chfs_vnocache_hash_destroy(struct chfs_vnode_cache **hash)
{
struct chfs_vnode_cache *this, *next;
int i;
/* free every row */
for (i = 0; i < VNODECACHE_SIZE; i++) {
this = hash[i];
while (this) {
@ -147,4 +138,3 @@ chfs_vnocache_hash_destroy(struct chfs_vnode_cache **hash)
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_vnops.c,v 1.10 2012/08/23 11:29:51 ttoth Exp $ */
/* $NetBSD: chfs_vnops.c,v 1.11 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -70,22 +70,22 @@ chfs_lookup(void *v)
*vpp = NULL;
// Check accessibility of requested node as a first step.
/* Check accessibility of requested node as a first step. */
error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
if (error != 0) {
goto out;
}
// If requesting the last path component on a read-only file system
// with a write operation, deny it.
/* If requesting the last path component on a read-only file system
* with a write operation, deny it. */
if ((cnp->cn_flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY)
&& (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
error = EROFS;
goto out;
}
// Avoid doing a linear scan of the directory if the requested
// directory/name couple is already in the cache.
/* Avoid doing a linear scan of the directory if the requested
* directory/name couple is already in the cache. */
error = cache_lookup(dvp, vpp, cnp);
if (error >= 0) {
goto out;
@ -101,7 +101,7 @@ chfs_lookup(void *v)
chvc = chfs_vnode_cache_get(chmp, ip->ino);
mutex_exit(&chmp->chm_lock_vnocache);
// We cannot be requesting the parent directory of the root node.
/* We cannot be requesting the parent directory of the root node. */
KASSERT(IMPLIES(ip->ch_type == CHT_DIR && chvc->pvno == chvc->vno,
!(cnp->cn_flags & ISDOTDOT)));
@ -118,10 +118,10 @@ chfs_lookup(void *v)
if (fd == NULL) {
dbg("fd null\n");
// The entry was not found in the directory.
// This is OK if we are creating or renaming an
// entry and are working on the last component of
// the path name.
/* The entry was not found in the directory.
* This is OK if we are creating or renaming an
* entry and are working on the last component of
* the path name. */
if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_nameiop == CREATE
|| cnp->cn_nameiop == RENAME)) {
error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
@ -136,10 +136,10 @@ chfs_lookup(void *v)
error = ENOENT;
}
} else {
// If we are not at the last path component and
// found a non-directory or non-link entry (which
// may itself be pointing to a directory), raise
// an error.
/* If we are not at the last path component and
* found a non-directory or non-link entry (which
* may itself be pointing to a directory), raise
* an error. */
if ((fd->type != CHT_DIR && fd->type != CHT_LNK) && !(cnp->cn_flags
& ISLASTCN)) {
error = ENOTDIR;
@ -151,19 +151,17 @@ chfs_lookup(void *v)
error = VFS_VGET(dvp->v_mount, fd->vno, vpp);
}
}
// Store the result of this lookup in the cache. Avoid this if the
// request was for creation, as it does not improve timings on
// emprical tests.
/* Store the result of this lookup in the cache. Avoid this if the
* request was for creation, as it does not improve timings on
* emprical tests. */
if (cnp->cn_nameiop != CREATE && (cnp->cn_flags & ISDOTDOT) == 0) {
cache_enter(dvp, *vpp, cnp);
}
out:
// If there were no errors, *vpp cannot be null and it must be
// locked.
/* If there were no errors, *vpp cannot be null and it must be
* locked. */
KASSERT(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp)));
// dvp must always be locked.
KASSERT(VOP_ISLOCKED(dvp));
return error;
@ -227,6 +225,7 @@ chfs_mknod(void *v)
ump = VFSTOUFS(dvp->v_mount);
chmp = ump->um_chfs;
/* Check type of node. */
if (vap->va_type != VBLK && vap->va_type != VCHR && vap->va_type != VFIFO)
return EINVAL;
@ -250,6 +249,7 @@ chfs_mknod(void *v)
}
}
/* Create a new node. */
err = chfs_makeinode(mode, dvp, &vp, cnp, vap->va_type);
ip = VTOI(vp);
@ -267,6 +267,7 @@ chfs_mknod(void *v)
if (err)
return err;
/* Device is written out as a data node. */
len = sizeof(dev_t);
chfs_set_vnode_size(vp, len);
bp = getiobuf(vp, true);
@ -286,6 +287,7 @@ chfs_mknod(void *v)
return err;
}
/* Add data node to the inode. */
err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
if (err) {
mutex_exit(&chmp->chm_lock_mountfields);
@ -324,7 +326,7 @@ chfs_open(void *v)
goto out;
}
// If the file is marked append-only, deny write requests.
/* If the file is marked append-only, deny write requests. */
if (ip->flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
error = EPERM;
else
@ -350,7 +352,6 @@ chfs_close(void *v)
ip = VTOI(vp);
if (ip->chvc->nlink > 0) {
//ip->chvc->nlink = 0;
chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
}
@ -467,6 +468,7 @@ chfs_setattr(void *v)
return EINVAL;
}
/* set flags */
if (error == 0 && (vap->va_flags != VNOVAL)) {
error = chfs_chflags(vp, vap->va_flags, cred);
return error;
@ -477,30 +479,28 @@ chfs_setattr(void *v)
return error;
}
/* set size */
if (error == 0 && (vap->va_size != VNOVAL)) {
error = chfs_chsize(vp, vap->va_size, cred);
if (error)
return error;
}
/* set owner */
if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL)) {
error = chfs_chown(vp, vap->va_uid, vap->va_gid, cred);
if (error)
return error;
}
/* set mode */
if (error == 0 && (vap->va_mode != VNOVAL)) {
error = chfs_chmod(vp, vap->va_mode, cred);
if (error)
return error;
}
#if 0
/* why do we need that? */
if (ip->flags & (IMMUTABLE | APPEND))
return EPERM;
#endif
/* set time */
if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp,
NULL, genfs_can_chtimes(vp, vap->va_vaflags, ip->uid, cred));
@ -516,6 +516,7 @@ chfs_setattr(void *v)
return error;
}
/* Write it out. */
mutex_enter(&chmp->chm_lock_mountfields);
error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
mutex_exit(&chmp->chm_lock_mountfields);
@ -600,6 +601,7 @@ chfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred)
#define blkroundup(chmp, size) \
(((size) + (chmp)->chm_fs_qbmask) & (chmp)->chm_fs_bmask)
/* from ffs read */
int
chfs_read(void *v)
{
@ -732,13 +734,11 @@ out:
if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
ip->iflag |= IN_ACCESS;
if ((ap->a_ioflag & IO_SYNC) == IO_SYNC) {
//error = UFS_WAPBL_BEGIN(vp->v_mount);
if (error) {
fstrans_done(vp->v_mount);
return error;
}
error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
//UFS_WAPBL_END(vp->v_mount);
}
}
@ -751,7 +751,7 @@ out:
/* --------------------------------------------------------------------- */
/*from ffs write*/
/* from ffs write */
int
chfs_write(void *v)
{
@ -781,11 +781,8 @@ chfs_write(void *v)
uio = ap->a_uio;
vp = ap->a_vp;
ip = VTOI(vp);
//dbg("file size (vp): %llu\n", (unsigned long long)vp->v_size);
//dbg("file size (ip): %llu\n", (unsigned long long)ip->i_size);
ump = ip->ump;
//dbg("uio->resid: %d\n", uio->uio_resid);
dbg("write\n");
KASSERT(vp->v_size == ip->size);
@ -834,7 +831,6 @@ chfs_write(void *v)
if (uio->uio_resid == 0)
return (0);
//mutex_enter(&ip->inode_lock);
fstrans_start(vp->v_mount, FSTRANS_SHARED);
flags = ioflag & IO_SYNC ? B_SYNC : 0;
@ -844,15 +840,6 @@ chfs_write(void *v)
osize = ip->size;
error = 0;
/*if ((ioflag & IO_JOURNALLOCKED) == 0) {
error = UFS_WAPBL_BEGIN(vp->v_mount);
if (error) {
fstrans_done(vp->v_mount);
return error;
}
}*/
preallocoff = round_page(blkroundup(chmp,
MAX(osize, uio->uio_offset)));
aflag = ioflag & IO_SYNC ? B_SYNC : 0;
@ -1013,8 +1000,6 @@ out:
chfs_set_vnode_size(vp, vp->v_size);
//dbg("end file size (vp): %llu\n", (unsigned long long)vp->v_size);
//dbg("end file size (ip): %llu\n", (unsigned long long)ip->i_size);
KASSERT(vp->v_size == ip->size);
fstrans_done(vp->v_mount);
@ -1022,8 +1007,6 @@ out:
error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
mutex_exit(&chmp->chm_lock_mountfields);
//mutex_exit(&ip->inode_lock);
//dbg("end\n");
return (error);
}
@ -1033,7 +1016,6 @@ out:
int
chfs_fsync(void *v)
{
//dbg("fsync\n");
struct vop_fsync_args /* {
struct vnode *a_vp;
kauth_cred_t a_cred;
@ -1047,8 +1029,6 @@ chfs_fsync(void *v)
return ENODEV;
}
vflushbuf(vp, ap->a_flags);
//struct chfs_inode *ip = VTOI(vp);
//chfs_set_vnode_size(vp, ip->write_size);
return 0;
}
@ -1142,7 +1122,7 @@ chfs_rename(void *v)
struct chfs_inode *oldparent, *old;
struct chfs_inode *newparent;
struct chfs_dirent *fd;//, *oldfd;
struct chfs_dirent *fd;
struct chfs_inode *ip;
int error = 0;
dbg("rename\n");
@ -1157,18 +1137,12 @@ chfs_rename(void *v)
dbg("tvp not null\n");
ip = VTOI(tvp);
if (tvp->v_type == VDIR) {
//TODO: lock
// fd = ip->dents;
// while (fd) {
TAILQ_FOREACH(fd, &ip->dents, fds) {
if (fd->vno) {
//TODO: unlock
error = ENOTEMPTY;
goto out_unlocked;
}
// fd = fd->next;
}
//TODO: unlock
}
error = chfs_do_unlink(ip,
newparent, tcnp->cn_nameptr, tcnp->cn_namelen);
@ -1177,21 +1151,15 @@ chfs_rename(void *v)
VFS_VGET(tdvp->v_mount, old->ino, &tvp);
ip = VTOI(tvp);
// for (oldfd = oldparent->dents;
// oldfd->vno != old->ino;
// oldfd = oldfd->next);
/* link new */
error = chfs_do_link(ip,
newparent, tcnp->cn_nameptr, tcnp->cn_namelen, ip->ch_type);
/* remove old */
error = chfs_do_unlink(old,
oldparent, fcnp->cn_nameptr, fcnp->cn_namelen);
//out:
// if (fchnode != tchnode)
// VOP_UNLOCK(fdvp, 0);
out_unlocked:
// Release target nodes.
/* Release target nodes. */
if (tdvp == tvp)
vrele(tdvp);
else
@ -1199,7 +1167,7 @@ out_unlocked:
if (tvp != NULL)
vput(tvp);
// Release source nodes.
/* Release source nodes. */
vrele(fdvp);
vrele(fvp);
@ -1254,7 +1222,6 @@ chfs_rmdir(void *v)
KASSERT(ip->chvc->vno != ip->chvc->pvno);
// for (fd = ip->dents; fd; fd = fd->next) {
TAILQ_FOREACH(fd, &ip->dents, fds) {
if (fd->vno) {
error = ENOTEMPTY;
@ -1304,6 +1271,7 @@ chfs_symlink(void *v)
ip = VTOI(vp);
/* TODO max symlink len instead of "100" */
if (len < 100) {
/* symlink path stored as a data node */
ip->target = kmem_alloc(len, KM_SLEEP);
memcpy(ip->target, target, len);
chfs_set_vnode_size(vp, len);
@ -1319,12 +1287,14 @@ chfs_symlink(void *v)
mutex_enter(&chmp->chm_lock_mountfields);
/* write out the data node */
err = chfs_write_flash_dnode(chmp, vp, bp, fd);
if (err) {
mutex_exit(&chmp->chm_lock_mountfields);
goto out;
}
/* add it to the inode */
err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
if (err) {
mutex_exit(&chmp->chm_lock_mountfields);
@ -1384,6 +1354,7 @@ chfs_readdir(void *v)
*/
offset = uio->uio_offset;
/* Add this entry. */
if (offset == CHFS_OFFSET_DOT) {
error = chfs_filldir(uio, ip->ino, ".", 1, CHT_DIR);
if (error == -1) {
@ -1395,6 +1366,7 @@ chfs_readdir(void *v)
offset = CHFS_OFFSET_DOTDOT;
}
/* Add parent entry. */
if (offset == CHFS_OFFSET_DOTDOT) {
ump = VFSTOUFS(vp->v_mount);
chmp = ump->um_chfs;
@ -1410,6 +1382,7 @@ chfs_readdir(void *v)
goto outok;
}
/* Has child or not? */
if (TAILQ_EMPTY(&ip->dents)) {
offset = CHFS_OFFSET_EOF;
} else {
@ -1418,6 +1391,7 @@ chfs_readdir(void *v)
}
if (offset != CHFS_OFFSET_EOF) {
/* Child entries. */
skip = offset - CHFS_OFFSET_FIRST;
TAILQ_FOREACH(fd, &ip->dents, fds) {
@ -1493,6 +1467,7 @@ chfs_inactive(void *v)
KASSERT(VOP_ISLOCKED(vp));
/* Reclaim only if there is no link to the node. */
if (ip->ino) {
chvc = ip->chvc;
if (chvc->nlink)
@ -1517,8 +1492,6 @@ chfs_reclaim(void *v)
struct chfs_mount *chmp = ip->chmp;
struct chfs_dirent *fd;
//dbg("reclaim() | ino: %llu\n", (unsigned long long)ip->ino);
//mutex_enter(&ip->inode_lock);
mutex_enter(&chmp->chm_lock_mountfields);
mutex_enter(&chmp->chm_lock_vnocache);
@ -1527,16 +1500,16 @@ chfs_reclaim(void *v)
chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
/* Clean fragments. */
chfs_kill_fragtree(chmp, &ip->fragtree);
/* Clean dirents. */
fd = TAILQ_FIRST(&ip->dents);
while (fd) {
TAILQ_REMOVE(&ip->dents, fd, fds);
chfs_free_dirent(fd);
fd = TAILQ_FIRST(&ip->dents);
}
//mutex_exit(&ip->inode_lock);
//mutex_destroy(&ip->inode_lock);
cache_purge(vp);
if (ip->devvp) {
@ -1559,15 +1532,6 @@ chfs_reclaim(void *v)
int
chfs_advlock(void *v)
{
//struct vnode *vp = ((struct vop_advlock_args *) v)->a_vp;
dbg("advlock()\n");
/*
struct chfs_node *node;
node = VP_TO_CHFS_NODE(vp);
return lf_advlock(v, &node->chn_lockf, node->chn_size);
*/
return 0;
}
@ -1588,11 +1552,6 @@ chfs_strategy(void *v)
int read = (bp->b_flags & B_READ) ? 1 : 0;
int err = 0;
/* dbg("bp dump:\n");
dbg(" ->b_bcount: %d\n", bp->b_bcount);
dbg(" ->b_resid: %d\n", bp->b_resid);
dbg(" ->b_blkno: %llu\n", (unsigned long long)bp->b_blkno);
dbg(" ->b_error: %d\n", bp->b_error);*/
if (read) {
err = chfs_read_data(chmp, vp, bp);
} else {
@ -1608,16 +1567,11 @@ chfs_strategy(void *v)
ip = VTOI(vp);
err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
/*if (err) {
mutex_exit(&chmp->chm_lock_mountfields);
goto out;
}*/
mutex_exit(&chmp->chm_lock_mountfields);
}
out:
biodone(bp);
//dbg("end\n");
return err;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_wbuf.c,v 1.4 2012/01/16 12:28:47 ahoka Exp $ */
/* $NetBSD: chfs_wbuf.c,v 1.5 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -45,15 +45,14 @@
#define PAGE_DIV(x) ( rounddown((x), chmp->chm_wbuf_pagesize) )
#define PAGE_MOD(x) ( (x) % (chmp->chm_wbuf_pagesize) )
/* writebuffer options */
enum {
WBUF_NOPAD,
WBUF_SETPAD
};
/**
/*
* chfs_flush_wbuf - write wbuf to the flash
* @chmp: super block info
* @pad: padding (WBUF_NOPAD / WBUF_SETPAD)
* Returns zero in case of success.
*/
static int
@ -69,11 +68,13 @@ chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
KASSERT(pad == WBUF_SETPAD || pad == WBUF_NOPAD);
/* check padding option */
if (pad == WBUF_SETPAD) {
chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0,
chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
/* add a padding node */
padnode = (void *)(chmp->chm_wbuf + chmp->chm_wbuf_len);
padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
padnode->type = htole16(CHFS_NODETYPE_PADDING);
@ -88,18 +89,21 @@ chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
CHFS_OBSOLETE_NODE_MASK;
chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
/* change sizes after padding node */
chfs_change_size_free(chmp, chmp->chm_nextblock,
-padnode->length);
chfs_change_size_wasted(chmp, chmp->chm_nextblock,
padnode->length);
}
/* write out the buffer */
ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf,
chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
if (ret) {
return ret;
}
/* reset the buffer */
memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
chmp->chm_wbuf_len = 0;
@ -108,35 +112,31 @@ chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
}
/**
* chfs_fill_wbuf - write to wbuf
* @chmp: super block info
* @buf: buffer
* @len: buffer length
/*
* chfs_fill_wbuf - write data to wbuf
* Return the len of the buf what we didn't write to the wbuf.
*/
static size_t
chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
{
/* check available space */
if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
return 0;
}
/* check buffer's length */
if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
}
/* write into the wbuf */
memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
/* update the actual length of writebuffer */
chmp->chm_wbuf_len += (int) len;
return len;
}
/**
/*
* chfs_write_wbuf - write to wbuf and then the flash
* @chmp: super block info
* @invecs: io vectors
* @count: num of vectors
* @to: offset of target
* @retlen: writed bytes
* Returns zero in case of success.
*/
int
@ -155,16 +155,12 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
//dbg("1. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
if (chmp->chm_wbuf_ofs == 0xffffffff) {
chmp->chm_wbuf_ofs = PAGE_DIV(to);
chmp->chm_wbuf_len = PAGE_MOD(to);
memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
}
//dbg("2. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
if (chmp->chm_wbuf_len) {
ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
@ -175,8 +171,6 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
chmp->chm_wbuf_len = PAGE_MOD(to);
}
//dbg("3. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
dbg("to: %llu != %zu\n", (unsigned long long)to,
PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
@ -187,7 +181,7 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
/* adjust alignment offset */
if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
chmp->chm_wbuf_len = PAGE_MOD(to);
/* take care of alignement to next page*/
/* take care of alignement to next page */
if (!chmp->chm_wbuf_len) {
chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
@ -200,8 +194,7 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
int vlen = invecs[invec].iov_len;
u_char* v = invecs[invec].iov_base;
//dbg("invec:%d len:%d\n", invec, vlen);
/* fill the whole wbuf */
wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
@ -209,19 +202,24 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
goto outerr;
}
}
vlen -= wbuf_retlen;
outvec_to += wbuf_retlen;
v += wbuf_retlen;
donelen += wbuf_retlen;
/* if there is more residual data than the length of the wbuf
* write it out directly until it's fit in the wbuf */
if (vlen >= chmp->chm_wbuf_pagesize) {
ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
//dbg("fd->write: %zu\n", wbuf_retlen);
vlen -= wbuf_retlen;
outvec_to += wbuf_retlen;
chmp->chm_wbuf_ofs = outvec_to;
v += wbuf_retlen;
donelen += wbuf_retlen;
}
/* write the residual data to the wbuf */
wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
@ -229,12 +227,6 @@ chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
goto outerr;
}
// if we write the last vector, we flush with padding
/*if (invec == count-1) {
ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
if (ret)
goto outerr;
}*/
outvec_to += wbuf_retlen;
donelen += wbuf_retlen;
}
@ -247,9 +239,13 @@ outerr:
return ret;
}
/*
* chfs_flush_peding_wbuf - write wbuf to the flash
* Used when we must flush wbuf right now.
* If wbuf has free space, pad it to the size of wbuf and write out.
*/
int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
{
//dbg("flush pending wbuf\n");
int err;
KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
mutex_enter(&chmp->chm_lock_sizes);

View File

@ -1,4 +1,4 @@
/* $NetBSD: chfs_write.c,v 1.4 2012/08/10 09:26:58 ttoth Exp $ */
/* $NetBSD: chfs_write.c,v 1.5 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -33,18 +33,14 @@
* SUCH DAMAGE.
*/
/*
* chfs_write.c
*
* Created on: 2010.02.17.
* Author: dtengeri
*/
#include <sys/param.h>
#include <sys/buf.h>
#include "chfs.h"
/* chfs_write_flash_vnode - writes out a vnode information to flash */
int
chfs_write_flash_vnode(struct chfs_mount *chmp,
struct chfs_inode *ip, int prio)
@ -58,6 +54,7 @@ chfs_write_flash_vnode(struct chfs_mount *chmp,
size_t size, retlen;
int err = 0, retries = 0;
/* root vnode is in-memory only */
if (ip->ino == CHFS_ROOTINO)
return 0;
@ -67,9 +64,8 @@ chfs_write_flash_vnode(struct chfs_mount *chmp,
chvc = ip->chvc;
/* setting up flash_vnode members */
/* setting up flash_vnode's fields */
size = sizeof(*fvnode);
//dbg("size: %zu | PADDED: %zu\n", size, CHFS_PAD(size));
fvnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
fvnode->type = htole16(CHFS_NODETYPE_VNODE);
fvnode->length = htole32(CHFS_PAD(size));
@ -86,10 +82,10 @@ chfs_write_flash_vnode(struct chfs_mount *chmp,
fvnode->uid = htole32(ip->uid);
fvnode->node_crc = htole32(crc32(0, (uint8_t *)fvnode, size - 4));
/* write out flash_vnode */
retry:
/* setting up the next eraseblock where we will write */
if (prio == ALLOC_GC) {
/* the GC calls this function */
/* GC called this function */
err = chfs_reserve_space_gc(chmp, CHFS_PAD(size));
if (err)
goto out;
@ -105,6 +101,7 @@ retry:
goto out;
}
/* allocating a new node reference */
nref = chfs_alloc_node_ref(chmp->chm_nextblock);
if (!nref) {
err = ENOMEM;
@ -113,12 +110,16 @@ retry:
mutex_enter(&chmp->chm_lock_sizes);
/* caculating offset and sizes */
nref->nref_offset = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
chfs_change_size_free(chmp, chmp->chm_nextblock, -CHFS_PAD(size));
vec.iov_base = fvnode;
vec.iov_len = CHFS_PAD(size);
/* write it into the writebuffer */
err = chfs_write_wbuf(chmp, &vec, 1, nref->nref_offset, &retlen);
if (err || retlen != CHFS_PAD(size)) {
/* there was an error during write */
chfs_err("error while writing out flash vnode to the media\n");
chfs_err("err: %d | size: %zu | retlen : %zu\n",
err, CHFS_PAD(size), retlen);
@ -130,15 +131,18 @@ retry:
goto out;
}
/* try again */
retries++;
mutex_exit(&chmp->chm_lock_sizes);
goto retry;
}
//Everything went well
/* everything went well */
chfs_change_size_used(chmp,
&chmp->chm_blocks[nref->nref_lnr], CHFS_PAD(size));
mutex_exit(&chmp->chm_lock_sizes);
/* add the new nref to vnode cache */
mutex_enter(&chmp->chm_lock_vnocache);
chfs_add_vnode_ref_to_vc(chmp, chvc, nref);
mutex_exit(&chmp->chm_lock_vnocache);
@ -148,6 +152,7 @@ out:
return err;
}
/* chfs_write_flash_dirent - writes out a directory entry to flash */
int
chfs_write_flash_dirent(struct chfs_mount *chmp, struct chfs_inode *pdir,
struct chfs_inode *ip, struct chfs_dirent *fd,
@ -165,6 +170,7 @@ chfs_write_flash_dirent(struct chfs_mount *chmp, struct chfs_inode *pdir,
KASSERT(fd->vno != CHFS_ROOTINO);
/* setting up flash_dirent's fields */
fdirent = chfs_alloc_flash_dirent();
if (!fdirent)
return ENOMEM;
@ -174,10 +180,7 @@ chfs_write_flash_dirent(struct chfs_mount *chmp, struct chfs_inode *pdir,
name = kmem_zalloc(namelen, KM_SLEEP);
memcpy(name, fd->name, fd->nsize);
//dbg("namelen: %zu | nsize: %hhu\n", namelen, fd->nsize);
//dbg("size: %zu | PADDED: %zu\n", size, CHFS_PAD(size));
fdirent->magic = htole16(CHFS_FS_MAGIC_BITMASK);
fdirent->type = htole16(CHFS_NODETYPE_DIRENT);
fdirent->length = htole32(CHFS_PAD(size));
@ -192,12 +195,14 @@ chfs_write_flash_dirent(struct chfs_mount *chmp, struct chfs_inode *pdir,
fdirent->name_crc = crc32(0, (uint8_t *)&(fd->name), fd->nsize);
fdirent->node_crc = crc32(0, (uint8_t *)fdirent, sizeof(*fdirent) - 4);
/* directory's name is written out right after the dirent */
vec[0].iov_base = fdirent;
vec[0].iov_len = sizeof(*fdirent);
vec[1].iov_base = name;
vec[1].iov_len = namelen;
retry:
/* setting up the next eraseblock where we will write */
if (prio == ALLOC_GC) {
/* the GC calls this function */
err = chfs_reserve_space_gc(chmp, CHFS_PAD(size));
@ -215,6 +220,7 @@ retry:
goto out;
}
/* allocating a new node reference */
nref = chfs_alloc_node_ref(chmp->chm_nextblock);
if (!nref) {
err = ENOMEM;
@ -226,8 +232,10 @@ retry:
nref->nref_offset = chmp->chm_ebh->eb_size - chmp->chm_nextblock->free_size;
chfs_change_size_free(chmp, chmp->chm_nextblock, -CHFS_PAD(size));
/* write it into the writebuffer */
err = chfs_write_wbuf(chmp, vec, 2, nref->nref_offset, &retlen);
if (err || retlen != CHFS_PAD(size)) {
/* there was an error during write */
chfs_err("error while writing out flash dirent node to the media\n");
chfs_err("err: %d | size: %zu | retlen : %zu\n",
err, CHFS_PAD(size), retlen);
@ -239,18 +247,20 @@ retry:
goto out;
}
/* try again */
retries++;
mutex_exit(&chmp->chm_lock_sizes);
goto retry;
}
// Everything went well
/* everything went well */
chfs_change_size_used(chmp,
&chmp->chm_blocks[nref->nref_lnr], CHFS_PAD(size));
mutex_exit(&chmp->chm_lock_sizes);
KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
/* add the new nref to the directory chain of vnode cache */
fd->nref = nref;
if (prio != ALLOC_DELETION) {
mutex_enter(&chmp->chm_lock_vnocache);
@ -263,12 +273,7 @@ out:
return err;
}
/**
* chfs_write_flash_dnode - write out a data node to flash
* @chmp: chfs mount structure
* @vp: vnode where the data belongs to
* @bp: buffer contains data
*/
/* chfs_write_flash_dnode - writes out a data node to flash */
int
chfs_write_flash_dnode(struct chfs_mount *chmp, struct vnode *vp,
struct buf *bp, struct chfs_full_dnode *fd)
@ -293,11 +298,6 @@ chfs_write_flash_dnode(struct chfs_mount *chmp, struct vnode *vp,
/* initialize flash data node */
ofs = bp->b_blkno * PAGE_SIZE;
//dbg("vp->v_size: %ju, bp->b_blkno: %ju, bp-b_data: %p,"
// " bp->b_resid: %ju\n",
// (uintmax_t )vp->v_size, (uintmax_t )bp->b_blkno,
// bp->b_data, (uintmax_t )bp->b_resid);
//dbg("[XXX]vp->v_size - ofs: %llu\n", (vp->v_size - ofs));
len = MIN((vp->v_size - ofs), bp->b_resid);
size = sizeof(*dnode) + len;
@ -317,13 +317,15 @@ chfs_write_flash_dnode(struct chfs_mount *chmp, struct vnode *vp,
dbg("dnode @%llu %ub v%llu\n", (unsigned long long)dnode->offset,
dnode->data_length, (unsigned long long)dnode->version);
/* pad data if needed */
if (CHFS_PAD(size) - sizeof(*dnode)) {
tmpbuf = kmem_zalloc(CHFS_PAD(size)
- sizeof(*dnode), KM_SLEEP);
memcpy(tmpbuf, bp->b_data, len);
}
/* creating iovecs for wbuf */
/* creating iovecs for writebuffer
* data is written out right after the data node */
vec[0].iov_base = dnode;
vec[0].iov_len = sizeof(*dnode);
vec[1].iov_base = tmpbuf;
@ -333,17 +335,16 @@ chfs_write_flash_dnode(struct chfs_mount *chmp, struct vnode *vp,
fd->size = len;
retry:
/* Reserve space for data node. This will set up the next eraseblock
* where to we will write.
*/
chfs_gc_trigger(chmp);
err = chfs_reserve_space_normal(chmp,
CHFS_PAD(size), ALLOC_NORMAL);
if (err)
goto out;
/* allocating a new node reference */
nref = chfs_alloc_node_ref(chmp->chm_nextblock);
if (!nref) {
err = ENOMEM;
@ -360,11 +361,10 @@ retry:
chfs_change_size_free(chmp,
chmp->chm_nextblock, -CHFS_PAD(size));
//dbg("vno: %llu nref lnr: %u offset: %u\n",
// dnode->vno, nref->nref_lnr, nref->nref_offset);
/* write it into the writebuffer */
err = chfs_write_wbuf(chmp, vec, 2, nref->nref_offset, &retlen);
if (err || retlen != CHFS_PAD(size)) {
/* there was an error during write */
chfs_err("error while writing out flash data node to the media\n");
chfs_err("err: %d | size: %zu | retlen : %zu\n",
err, size, retlen);
@ -376,11 +376,12 @@ retry:
goto out;
}
/* try again */
retries++;
mutex_exit(&chmp->chm_lock_sizes);
goto retry;
}
/* Everything went well */
/* everything went well */
ip->write_size += fd->size;
chfs_change_size_used(chmp,
&chmp->chm_blocks[nref->nref_lnr], CHFS_PAD(size));
@ -392,6 +393,7 @@ retry:
chfs_remove_and_obsolete(chmp, ip->chvc, fd->nref, &ip->chvc->dnode);
}
/* add the new nref to the data node chain of vnode cache */
KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
fd->nref = nref;
chfs_add_node_to_list(chmp, ip->chvc, nref, &ip->chvc->dnode);
@ -405,13 +407,8 @@ out:
return err;
}
/**
/*
* chfs_do_link - makes a copy from a node
* @old: old node
* @oldfd: dirent of old node
* @parent: parent of new node
* @name: name of new node
* @namelen: length of name
* This function writes the dirent of the new node to the media.
*/
int
@ -422,10 +419,8 @@ chfs_do_link(struct chfs_inode *ip, struct chfs_inode *parent, const char *name,
struct ufsmount *ump = VFSTOUFS(vp->v_mount);
struct chfs_mount *chmp = ump->um_chfs;
struct chfs_dirent *newfd = NULL;
// struct chfs_dirent *fd = NULL;
//dbg("link vno: %llu\n", ip->ino);
/* setting up the new directory entry */
newfd = chfs_alloc_dirent(namelen + 1);
newfd->vno = ip->ino;
@ -433,7 +428,6 @@ chfs_do_link(struct chfs_inode *ip, struct chfs_inode *parent, const char *name,
newfd->nsize = namelen;
memcpy(newfd->name, name, namelen);
newfd->name[newfd->nsize] = 0;
// newfd->next = NULL;
ip->chvc->nlink++;
parent->chvc->nlink++;
@ -442,10 +436,12 @@ chfs_do_link(struct chfs_inode *ip, struct chfs_inode *parent, const char *name,
mutex_enter(&chmp->chm_lock_mountfields);
/* update vnode information */
error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
if (error)
return error;
/* write out the new dirent */
error = chfs_write_flash_dirent(chmp,
parent, ip, newfd, ip->ino, ALLOC_NORMAL);
/* TODO: what should we do if error isn't zero? */
@ -454,28 +450,15 @@ chfs_do_link(struct chfs_inode *ip, struct chfs_inode *parent, const char *name,
/* add fd to the fd list */
TAILQ_INSERT_TAIL(&parent->dents, newfd, fds);
#if 0
fd = parent->dents;
if (!fd) {
parent->dents = newfd;
} else {
while (fd->next)
fd = fd->next;
fd->next = newfd;
}
#endif
return error;
}
/**
/*
* chfs_do_unlink - delete a node
* @ip: node what we'd like to delete
* @parent: parent of the node
* @name: name of the node
* @namelen: length of name
* This function set the nlink and vno of the node zero and write its dirent to the media.
* This function set the nlink and vno of the node to zero and
* write its dirent to the media.
*/
int
chfs_do_unlink(struct chfs_inode *ip,
@ -488,8 +471,6 @@ chfs_do_unlink(struct chfs_inode *ip,
struct chfs_mount *chmp = ump->um_chfs;
struct chfs_node_ref *nref;
//dbg("unlink vno: %llu\n", ip->ino);
vflushbuf(vp, 0);
mutex_enter(&chmp->chm_lock_mountfields);
@ -500,8 +481,10 @@ chfs_do_unlink(struct chfs_inode *ip,
fd->nsize == namelen &&
!memcmp(fd->name, name, fd->nsize)) {
/* remove every fragment of the file */
chfs_kill_fragtree(chmp, &ip->fragtree);
/* decrease number of links to the file */
if (fd->type == CHT_DIR && ip->chvc->nlink == 2)
ip->chvc->nlink = 0;
else
@ -509,6 +492,7 @@ chfs_do_unlink(struct chfs_inode *ip,
fd->type = CHT_BLANK;
/* remove from parent's directory entries */
TAILQ_REMOVE(&parent->dents, fd, fds);
mutex_enter(&chmp->chm_lock_vnocache);
@ -523,26 +507,27 @@ chfs_do_unlink(struct chfs_inode *ip,
dbg("FD->NREF vno: %llu, lnr: %u, ofs: %u\n",
fd->vno, fd->nref->nref_lnr, fd->nref->nref_offset);
// set nref_next field
/* set nref_next field */
chfs_add_node_to_list(chmp, parent->chvc, fd->nref,
&parent->chvc->dirents);
// remove from the list
/* remove from the list */
chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
&parent->chvc->dirents);
// clean dnode list
/* clean dnode list */
while (ip->chvc->dnode != (struct chfs_node_ref *)ip->chvc) {
nref = ip->chvc->dnode;
chfs_remove_frags_of_node(chmp, &ip->fragtree, nref);
chfs_remove_and_obsolete(chmp, ip->chvc, nref, &ip->chvc->dnode);
}
// clean v list
/* clean vnode information (list) */
while (ip->chvc->v != (struct chfs_node_ref *)ip->chvc) {
nref = ip->chvc->v;
chfs_remove_and_obsolete(chmp, ip->chvc, nref, &ip->chvc->v);
}
/* decrease number of links to parent */
parent->chvc->nlink--;
mutex_exit(&chmp->chm_lock_vnocache);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ebh.h,v 1.2 2012/04/13 14:50:35 ttoth Exp $ */
/* $NetBSD: ebh.h,v 1.3 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -32,13 +32,6 @@
* SUCH DAMAGE.
*/
/*
* ebh.h
*
* Created on: 2009.11.03.
* Author: dtengeri
*/
#ifndef EBH_H_
#define EBH_H_

View File

@ -1,4 +1,4 @@
/* $NetBSD: ebh_misc.h,v 1.1 2011/11/24 15:51:32 ahoka Exp $ */
/* $NetBSD: ebh_misc.h,v 1.2 2012/10/19 12:44:39 ttoth Exp $ */
/*-
* Copyright (c) 2010 Department of Software Engineering,
@ -33,20 +33,15 @@
#ifndef EBH_MISC_H_
#define EBH_MISC_H_
/******************************************************************************/
/* EBH specific functions */
/******************************************************************************/
/* EBH specific functions */
#define CHFS_GET_MEMBER_POS(type, member) \
((unsigned long)(&((type *)0)->member))
#define CHFS_GET_LID(lid) (le32toh(lid) & CHFS_LID_DIRTY_BIT_MASK)
/**
/*
* EBH_TREE_DESTROY - destroys an RB-tree and frees the memory of its elements.
* @name - the RB-tree structure's name
* @head - pointer to the RB-tree's head
* @type - type of the elements
*/
#define EBH_TREE_DESTROY(name, head, type) \
{ \
@ -70,11 +65,8 @@
} \
}
/**
/*
* EBH_QUEUE_DESTROY - destroys a TAILQ and frees the memory of its elements.
* @head: pointer to the head of the queue
* @type: type of the elements
* @entry: name of TAILQ_ENTRY
*/
#define EBH_QUEUE_DESTROY(head, type, entry) \
{ \

View File

@ -40,23 +40,20 @@
typedef uint16_t le16;
typedef uint32_t le32;
typedef uint64_t le64;
#endif
/*****************************************************************************/
/* File system specific structures */
/*****************************************************************************/
#endif /* _LE_TYPES */
/* node types */
enum {
CHFS_NODETYPE_VNODE = 1,
CHFS_NODETYPE_DATA,
CHFS_NODETYPE_DIRENT,
CHFS_NODETYPE_PADDING,
CHFS_NODETYPE_VNODE = 1, /* vnode information */
CHFS_NODETYPE_DATA, /* data node */
CHFS_NODETYPE_DIRENT, /* directory enrty */
CHFS_NODETYPE_PADDING, /* padding node */
};
//#define CHFS_NODE_HDR_SIZE 12 /* magic + type + length + hdr_crc */
#define CHFS_NODE_HDR_SIZE sizeof(struct chfs_flash_node_hdr)
/* Max size we have to read to get all info.
/*
* Max size we have to read to get all info.
* It is max size of chfs_flash_dirent_node with max name length.
*/
#define CHFS_MAX_NODE_SIZE 299
@ -64,137 +61,83 @@ enum {
/* This will identify CHfs nodes */
#define CHFS_FS_MAGIC_BITMASK 0x4AF1
/**
* struct chfs_flash_node_hdr - node header, its members are same for
* all nodes, used at scan
* @magic: filesystem magic
* @type: node type
* @length: length of node
* @hdr_crc: crc of the first 3 members
/*
* struct chfs_flash_node_hdr -
* node header, its members are same for all nodes, used at scan
*/
struct chfs_flash_node_hdr
{
le16 magic;
le16 type;
le32 length;
le32 hdr_crc;
le16 magic; /* filesystem magic */
le16 type; /* node type */
le32 length; /* length of node */
le32 hdr_crc; /* crc of the first 3 fields */
} __packed;
/**
* struct chfs_flash_vnode - vnode informations stored on flash
* @magic: filesystem magic
* @type: node type (CHFS_NODETYPE_VNODE)
* @length: length of node
* @hdr_crc: crc of the first 3 members
* @vno: vnode identifier id
* @version: vnode's version number
* @uid: owner of the file
* @gid: group of file
* @mode: permissions for vnode
* @dn_size: size of written out data nodes
* @atime: last access times
* @mtime: last modification time
* @ctime: change time
* @dsize: size of the node's data
* @node_crc: crc of full node
*/
/* struct chfs_flash_vnode - vnode informations stored on flash */
struct chfs_flash_vnode
{
le16 magic; /*0 */
le16 type; /*2 */
le32 length; /*4 */
le32 hdr_crc; /*8 */
le64 vno; /*12*/
le64 version; /*20*/
le32 uid; /*28*/
le32 gid; /*32*/
le32 mode; /*36*/
le32 dn_size; /*40*/
le32 atime; /*44*/
le32 mtime; /*48*/
le32 ctime; /*52*/
le32 dsize; /*56*/
le32 node_crc; /*60*/
le16 magic; /* filesystem magic */
le16 type; /* node type (should be CHFS_NODETYPE_VNODE) */
le32 length; /* length of node */
le32 hdr_crc; /* crc of the first 3 fields */
le64 vno; /* vnode number */
le64 version; /* version of node */
le32 uid; /* owner of file */
le32 gid; /* group of file */
le32 mode; /* permission of vnode */
le32 dn_size; /* size of written data */
le32 atime; /* last access time */
le32 mtime; /* last modification time */
le32 ctime; /* change time */
le32 dsize; /* NOT USED, backward compatibility */
le32 node_crc; /* crc of all the previous fields */
} __packed;
/**
* struct chfs_flash_data_node - node informations of data stored on flash
* @magic: filesystem magic
* @type: node type (CHFS_NODETYPE_DATA)
* @length: length of node with data
* @hdr_crc: crc of the first 3 members
* @vno: vnode identifier id
* @version: vnode's version number
* @offset: offset in the file where write begins
* @data_length: length of data
* @data_crc: crc of data
* @node_crc: crc of full node
* @data: array of data
*/
/* struct chfs_flash_data_node - data stored on flash */
struct chfs_flash_data_node
{
le16 magic;
le16 type;
le32 length;
le32 hdr_crc;
le64 vno;
le64 version;
le64 offset;
le32 data_length;
le32 data_crc;
le32 node_crc;
uint8_t data[0];
le16 magic; /* filesystem magic */
le16 type; /* node type (should be CHFS_NODETYPE_DATA) */
le32 length; /* length of vnode with data */
le32 hdr_crc; /* crc of the first 3 fields */
le64 vno; /* vnode number */
le64 version; /* version of node */
le64 offset; /* offset in the file */
le32 data_length; /* length of data */
le32 data_crc; /* crc of data*/
le32 node_crc; /* crc of full node */
uint8_t data[0]; /* data */
} __packed;
/**
* struct chfs_flash_dirent_node - vnode informations stored on flash
* @magic: filesystem magic
* @type: node type (CHFS_NODETYPE_DIRENT)
* @length: length of node
* @hdr_crc: crc of the first 3 members
* @vno: vnode identifier id
* @pvno: vnode identifier id of parent vnode
* @version: vnode's version number
* @mctime:
* @nsize: length of name
* @dtype: file type
* @unused: just for padding
* @name_crc: crc of name
* @node_crc: crc of full node
* @name: name of the directory entry
/*
* struct chfs_flash_dirent_node -
* directory entry information stored on flash
*/
struct chfs_flash_dirent_node
{
le16 magic;
le16 type;
le32 length;
le32 hdr_crc;
le64 vno;
le64 pvno;
le64 version;
le32 mctime;
uint8_t nsize;
uint8_t dtype;
uint8_t unused[2];
le32 name_crc;
le32 node_crc;
uint8_t name[0];
le16 magic; /* filesystem magic */
le16 type; /* node type (should be CHFS_NODETYPE_DIRENT) */
le32 length; /* length of node with name */
le32 hdr_crc; /* crc of the first 3 fields */
le64 vno; /* vnode number */
le64 pvno; /* parent's vnode number */
le64 version; /* version of node */
le32 mctime; /* */
uint8_t nsize; /* length of name */
uint8_t dtype; /* file type */
uint8_t unused[2]; /* just for padding */
le32 name_crc; /* crc of name */
le32 node_crc; /* crc of full node */
uint8_t name[0]; /* name of directory entry */
} __packed;
/**
* struct chfs_flash_padding_node - node informations of data stored on
* flash
* @magic: filesystem magic
* @type: node type (CHFS_NODETYPE_PADDING)
* @length: length of node
* @hdr_crc: crc of the first 3 members
*/
/* struct chfs_flash_padding_node - spaceholder node on flash */
struct chfs_flash_padding_node
{
le16 magic;
le16 type;
le32 length;
le32 hdr_crc;
le16 magic; /* filesystem magic */
le16 type; /* node type (should be CHFS_NODETYPE_PADDING )*/
le32 length; /* length of node */
le32 hdr_crc; /* crc of the first 3 fields */
} __packed;
#endif /* __CHFS_MEDIA_H__ */