diff --git a/external/cddl/osnet/dist/cmd/zdb/zdb.c b/external/cddl/osnet/dist/cmd/zdb/zdb.c new file mode 100644 index 000000000000..48a84d693e44 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zdb/zdb.c @@ -0,0 +1,2430 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#undef ZFS_MAXNAMELEN +#undef verify +#include + +const char cmdname[] = "zdb"; +uint8_t dump_opt[256]; + +typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); + +extern void dump_intent_log(zilog_t *); +uint64_t *zopt_object = NULL; +int zopt_objects = 0; +libzfs_handle_t *g_zfs; +boolean_t zdb_sig_user_data = B_TRUE; +int zdb_sig_cksumalg = ZIO_CHECKSUM_SHA256; + +/* + * These libumem hooks provide a reasonable set of defaults for the allocator's + * debugging facilities. + */ +const char * +_umem_debug_init() +{ + return ("default,verbose"); /* $UMEM_DEBUG setting */ +} + +const char * +_umem_logging_init(void) +{ + return ("fail,contents"); /* $UMEM_LOGGING setting */ +} + +static void +usage(void) +{ + (void) fprintf(stderr, + "Usage: %s [-udibcsvL] [-U cachefile_path] " + "[-S user:cksumalg] " + "dataset [object...]\n" + " %s -C [pool]\n" + " %s -l dev\n" + " %s -R pool:vdev:offset:size:flags\n" + " %s [-p path_to_vdev_dir]\n" + " %s -e pool | GUID | devid ...\n", + cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); + + (void) fprintf(stderr, " -u uberblock\n"); + (void) fprintf(stderr, " -d datasets\n"); + (void) fprintf(stderr, " -C cached pool configuration\n"); + (void) fprintf(stderr, " -i intent logs\n"); + (void) fprintf(stderr, " -b block statistics\n"); + (void) fprintf(stderr, " -c checksum all data blocks\n"); + (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); + (void) fprintf(stderr, " -S : -- " + "dump blkptr signatures\n"); + (void) fprintf(stderr, " -v verbose (applies to all others)\n"); + (void) fprintf(stderr, " -l dump label contents\n"); + (void) fprintf(stderr, " -L disable leak tracking (do not " + "load spacemaps)\n"); + (void) fprintf(stderr, " -U cachefile_path -- use alternate " + "cachefile\n"); + (void) fprintf(stderr, " -R read and display block from a " + "device\n"); + (void) fprintf(stderr, " -e Pool is exported/destroyed/" + "has altroot\n"); + (void) fprintf(stderr, " -p (use with -e)\n"); + (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " + "to make only that option verbose\n"); + (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); + exit(1); +} + +static void +fatal(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + (void) fprintf(stderr, "%s: ", cmdname); + (void) vfprintf(stderr, fmt, ap); + va_end(ap); + (void) fprintf(stderr, "\n"); + + abort(); +} + +static void +dump_nvlist(nvlist_t *list, int indent) +{ + nvpair_t *elem = NULL; + + while ((elem = nvlist_next_nvpair(list, elem)) != NULL) { + switch (nvpair_type(elem)) { + case DATA_TYPE_STRING: + { + char *value; + + VERIFY(nvpair_value_string(elem, &value) == 0); + (void) printf("%*s%s='%s'\n", indent, "", + nvpair_name(elem), value); + } + break; + + case DATA_TYPE_UINT64: + { + uint64_t value; + + VERIFY(nvpair_value_uint64(elem, &value) == 0); + (void) printf("%*s%s=%llu\n", indent, "", + nvpair_name(elem), (u_longlong_t)value); + } + break; + + case DATA_TYPE_NVLIST: + { + nvlist_t *value; + + VERIFY(nvpair_value_nvlist(elem, &value) == 0); + (void) printf("%*s%s\n", indent, "", + nvpair_name(elem)); + dump_nvlist(value, indent + 4); + } + break; + + case DATA_TYPE_NVLIST_ARRAY: + { + nvlist_t **value; + uint_t c, count; + + VERIFY(nvpair_value_nvlist_array(elem, &value, + &count) == 0); + + for (c = 0; c < count; c++) { + (void) printf("%*s%s[%u]\n", indent, "", + nvpair_name(elem), c); + dump_nvlist(value[c], indent + 8); + } + } + break; + + default: + + (void) printf("bad config type %d for %s\n", + nvpair_type(elem), nvpair_name(elem)); + } + } +} + +/* ARGSUSED */ +static void +dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) +{ + nvlist_t *nv; + size_t nvsize = *(uint64_t *)data; + char *packed = umem_alloc(nvsize, UMEM_NOFAIL); + + VERIFY(0 == dmu_read(os, object, 0, nvsize, packed)); + + VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); + + umem_free(packed, nvsize); + + dump_nvlist(nv, 8); + + nvlist_free(nv); +} + +const char dump_zap_stars[] = "****************************************"; +const int dump_zap_width = sizeof (dump_zap_stars) - 1; + +static void +dump_zap_histogram(uint64_t histo[ZAP_HISTOGRAM_SIZE]) +{ + int i; + int minidx = ZAP_HISTOGRAM_SIZE - 1; + int maxidx = 0; + uint64_t max = 0; + + for (i = 0; i < ZAP_HISTOGRAM_SIZE; i++) { + if (histo[i] > max) + max = histo[i]; + if (histo[i] > 0 && i > maxidx) + maxidx = i; + if (histo[i] > 0 && i < minidx) + minidx = i; + } + + if (max < dump_zap_width) + max = dump_zap_width; + + for (i = minidx; i <= maxidx; i++) + (void) printf("\t\t\t%u: %6llu %s\n", i, (u_longlong_t)histo[i], + &dump_zap_stars[(max - histo[i]) * dump_zap_width / max]); +} + +static void +dump_zap_stats(objset_t *os, uint64_t object) +{ + int error; + zap_stats_t zs; + + error = zap_get_stats(os, object, &zs); + if (error) + return; + + if (zs.zs_ptrtbl_len == 0) { + ASSERT(zs.zs_num_blocks == 1); + (void) printf("\tmicrozap: %llu bytes, %llu entries\n", + (u_longlong_t)zs.zs_blocksize, + (u_longlong_t)zs.zs_num_entries); + return; + } + + (void) printf("\tFat ZAP stats:\n"); + + (void) printf("\t\tPointer table:\n"); + (void) printf("\t\t\t%llu elements\n", + (u_longlong_t)zs.zs_ptrtbl_len); + (void) printf("\t\t\tzt_blk: %llu\n", + (u_longlong_t)zs.zs_ptrtbl_zt_blk); + (void) printf("\t\t\tzt_numblks: %llu\n", + (u_longlong_t)zs.zs_ptrtbl_zt_numblks); + (void) printf("\t\t\tzt_shift: %llu\n", + (u_longlong_t)zs.zs_ptrtbl_zt_shift); + (void) printf("\t\t\tzt_blks_copied: %llu\n", + (u_longlong_t)zs.zs_ptrtbl_blks_copied); + (void) printf("\t\t\tzt_nextblk: %llu\n", + (u_longlong_t)zs.zs_ptrtbl_nextblk); + + (void) printf("\t\tZAP entries: %llu\n", + (u_longlong_t)zs.zs_num_entries); + (void) printf("\t\tLeaf blocks: %llu\n", + (u_longlong_t)zs.zs_num_leafs); + (void) printf("\t\tTotal blocks: %llu\n", + (u_longlong_t)zs.zs_num_blocks); + (void) printf("\t\tzap_block_type: 0x%llx\n", + (u_longlong_t)zs.zs_block_type); + (void) printf("\t\tzap_magic: 0x%llx\n", + (u_longlong_t)zs.zs_magic); + (void) printf("\t\tzap_salt: 0x%llx\n", + (u_longlong_t)zs.zs_salt); + + (void) printf("\t\tLeafs with 2^n pointers:\n"); + dump_zap_histogram(zs.zs_leafs_with_2n_pointers); + + (void) printf("\t\tBlocks with n*5 entries:\n"); + dump_zap_histogram(zs.zs_blocks_with_n5_entries); + + (void) printf("\t\tBlocks n/10 full:\n"); + dump_zap_histogram(zs.zs_blocks_n_tenths_full); + + (void) printf("\t\tEntries with n chunks:\n"); + dump_zap_histogram(zs.zs_entries_using_n_chunks); + + (void) printf("\t\tBuckets with n entries:\n"); + dump_zap_histogram(zs.zs_buckets_with_n_entries); +} + +/*ARGSUSED*/ +static void +dump_none(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +/*ARGSUSED*/ +void +dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +/*ARGSUSED*/ +static void +dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +/*ARGSUSED*/ +static void +dump_zap(objset_t *os, uint64_t object, void *data, size_t size) +{ + zap_cursor_t zc; + zap_attribute_t attr; + void *prop; + int i; + + dump_zap_stats(os, object); + (void) printf("\n"); + + for (zap_cursor_init(&zc, os, object); + zap_cursor_retrieve(&zc, &attr) == 0; + zap_cursor_advance(&zc)) { + (void) printf("\t\t%s = ", attr.za_name); + if (attr.za_num_integers == 0) { + (void) printf("\n"); + continue; + } + prop = umem_zalloc(attr.za_num_integers * + attr.za_integer_length, UMEM_NOFAIL); + (void) zap_lookup(os, object, attr.za_name, + attr.za_integer_length, attr.za_num_integers, prop); + if (attr.za_integer_length == 1) { + (void) printf("%s", (char *)prop); + } else { + for (i = 0; i < attr.za_num_integers; i++) { + switch (attr.za_integer_length) { + case 2: + (void) printf("%u ", + ((uint16_t *)prop)[i]); + break; + case 4: + (void) printf("%u ", + ((uint32_t *)prop)[i]); + break; + case 8: + (void) printf("%lld ", + (u_longlong_t)((int64_t *)prop)[i]); + break; + } + } + } + (void) printf("\n"); + umem_free(prop, attr.za_num_integers * attr.za_integer_length); + } + zap_cursor_fini(&zc); +} + +/*ARGSUSED*/ +static void +dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) +{ + zap_cursor_t zc; + zap_attribute_t attr; + const char *typenames[] = { + /* 0 */ "not specified", + /* 1 */ "FIFO", + /* 2 */ "Character Device", + /* 3 */ "3 (invalid)", + /* 4 */ "Directory", + /* 5 */ "5 (invalid)", + /* 6 */ "Block Device", + /* 7 */ "7 (invalid)", + /* 8 */ "Regular File", + /* 9 */ "9 (invalid)", + /* 10 */ "Symbolic Link", + /* 11 */ "11 (invalid)", + /* 12 */ "Socket", + /* 13 */ "Door", + /* 14 */ "Event Port", + /* 15 */ "15 (invalid)", + }; + + dump_zap_stats(os, object); + (void) printf("\n"); + + for (zap_cursor_init(&zc, os, object); + zap_cursor_retrieve(&zc, &attr) == 0; + zap_cursor_advance(&zc)) { + (void) printf("\t\t%s = %lld (type: %s)\n", + attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), + typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); + } + zap_cursor_fini(&zc); +} + +static void +dump_spacemap(objset_t *os, space_map_obj_t *smo, space_map_t *sm) +{ + uint64_t alloc, offset, entry; + uint8_t mapshift = sm->sm_shift; + uint64_t mapstart = sm->sm_start; + char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", + "INVALID", "INVALID", "INVALID", "INVALID" }; + + if (smo->smo_object == 0) + return; + + /* + * Print out the freelist entries in both encoded and decoded form. + */ + alloc = 0; + for (offset = 0; offset < smo->smo_objsize; offset += sizeof (entry)) { + VERIFY(0 == dmu_read(os, smo->smo_object, offset, + sizeof (entry), &entry)); + if (SM_DEBUG_DECODE(entry)) { + (void) printf("\t\t[%4llu] %s: txg %llu, pass %llu\n", + (u_longlong_t)(offset / sizeof (entry)), + ddata[SM_DEBUG_ACTION_DECODE(entry)], + (u_longlong_t)SM_DEBUG_TXG_DECODE(entry), + (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry)); + } else { + (void) printf("\t\t[%4llu] %c range:" + " %08llx-%08llx size: %06llx\n", + (u_longlong_t)(offset / sizeof (entry)), + SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F', + (u_longlong_t)((SM_OFFSET_DECODE(entry) << + mapshift) + mapstart), + (u_longlong_t)((SM_OFFSET_DECODE(entry) << + mapshift) + mapstart + (SM_RUN_DECODE(entry) << + mapshift)), + (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift)); + if (SM_TYPE_DECODE(entry) == SM_ALLOC) + alloc += SM_RUN_DECODE(entry) << mapshift; + else + alloc -= SM_RUN_DECODE(entry) << mapshift; + } + } + if (alloc != smo->smo_alloc) { + (void) printf("space_map_object alloc (%llu) INCONSISTENT " + "with space map summary (%llu)\n", + (u_longlong_t)smo->smo_alloc, (u_longlong_t)alloc); + } +} + +static void +dump_metaslab(metaslab_t *msp) +{ + char freebuf[5]; + space_map_obj_t *smo = &msp->ms_smo; + vdev_t *vd = msp->ms_group->mg_vd; + spa_t *spa = vd->vdev_spa; + + nicenum(msp->ms_map.sm_size - smo->smo_alloc, freebuf); + + if (dump_opt['d'] <= 5) { + (void) printf("\t%10llx %10llu %5s\n", + (u_longlong_t)msp->ms_map.sm_start, + (u_longlong_t)smo->smo_object, + freebuf); + return; + } + + (void) printf( + "\tvdev %llu offset %08llx spacemap %4llu free %5s\n", + (u_longlong_t)vd->vdev_id, (u_longlong_t)msp->ms_map.sm_start, + (u_longlong_t)smo->smo_object, freebuf); + + ASSERT(msp->ms_map.sm_size == (1ULL << vd->vdev_ms_shift)); + + dump_spacemap(spa->spa_meta_objset, smo, &msp->ms_map); +} + +static void +dump_metaslabs(spa_t *spa) +{ + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *vd; + int c, m; + + (void) printf("\nMetaslabs:\n"); + + for (c = 0; c < rvd->vdev_children; c++) { + vd = rvd->vdev_child[c]; + + (void) printf("\n vdev %llu\n\n", (u_longlong_t)vd->vdev_id); + + if (dump_opt['d'] <= 5) { + (void) printf("\t%10s %10s %5s\n", + "offset", "spacemap", "free"); + (void) printf("\t%10s %10s %5s\n", + "------", "--------", "----"); + } + for (m = 0; m < vd->vdev_ms_count; m++) + dump_metaslab(vd->vdev_ms[m]); + (void) printf("\n"); + } +} + +static void +dump_dtl(vdev_t *vd, int indent) +{ + avl_tree_t *t = &vd->vdev_dtl_map.sm_root; + space_seg_t *ss; + vdev_t *pvd; + int c; + + if (indent == 0) + (void) printf("\nDirty time logs:\n\n"); + + (void) printf("\t%*s%s\n", indent, "", + vd->vdev_path ? vd->vdev_path : + vd->vdev_parent ? vd->vdev_ops->vdev_op_type : + spa_name(vd->vdev_spa)); + + for (ss = avl_first(t); ss; ss = AVL_NEXT(t, ss)) { + /* + * Everything in this DTL must appear in all parent DTL unions. + */ + for (pvd = vd; pvd; pvd = pvd->vdev_parent) + ASSERT(vdev_dtl_contains(&pvd->vdev_dtl_map, + ss->ss_start, ss->ss_end - ss->ss_start)); + (void) printf("\t%*soutage [%llu,%llu] length %llu\n", + indent, "", + (u_longlong_t)ss->ss_start, + (u_longlong_t)ss->ss_end - 1, + (u_longlong_t)(ss->ss_end - ss->ss_start)); + } + + (void) printf("\n"); + + if (dump_opt['d'] > 5 && vd->vdev_children == 0) { + dump_spacemap(vd->vdev_spa->spa_meta_objset, &vd->vdev_dtl, + &vd->vdev_dtl_map); + (void) printf("\n"); + } + + for (c = 0; c < vd->vdev_children; c++) + dump_dtl(vd->vdev_child[c], indent + 4); +} + +/*ARGSUSED*/ +static void +dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +static uint64_t +blkid2offset(const dnode_phys_t *dnp, int level, uint64_t blkid) +{ + if (level < 0) + return (blkid); + + return ((blkid << (level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * + dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); +} + +static void +sprintf_blkptr_compact(char *blkbuf, blkptr_t *bp, int alldvas) +{ + dva_t *dva = bp->blk_dva; + int ndvas = alldvas ? BP_GET_NDVAS(bp) : 1; + int i; + + blkbuf[0] = '\0'; + + for (i = 0; i < ndvas; i++) + (void) sprintf(blkbuf + strlen(blkbuf), "%llu:%llx:%llx ", + (u_longlong_t)DVA_GET_VDEV(&dva[i]), + (u_longlong_t)DVA_GET_OFFSET(&dva[i]), + (u_longlong_t)DVA_GET_ASIZE(&dva[i])); + + (void) sprintf(blkbuf + strlen(blkbuf), "%llxL/%llxP F=%llu B=%llu", + (u_longlong_t)BP_GET_LSIZE(bp), + (u_longlong_t)BP_GET_PSIZE(bp), + (u_longlong_t)bp->blk_fill, + (u_longlong_t)bp->blk_birth); +} + +static void +print_indirect(blkptr_t *bp, const zbookmark_t *zb, + const dnode_phys_t *dnp) +{ + char blkbuf[BP_SPRINTF_LEN]; + int l; + + ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); + ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); + + (void) printf("%16llx ", + (u_longlong_t)blkid2offset(dnp, zb->zb_level, zb->zb_blkid)); + + ASSERT(zb->zb_level >= 0); + + for (l = dnp->dn_nlevels - 1; l >= -1; l--) { + if (l == zb->zb_level) { + (void) printf("L%llx", (u_longlong_t)zb->zb_level); + } else { + (void) printf(" "); + } + } + + sprintf_blkptr_compact(blkbuf, bp, dump_opt['d'] > 5 ? 1 : 0); + (void) printf("%s\n", blkbuf); +} + +#define SET_BOOKMARK(zb, objset, object, level, blkid) \ +{ \ + (zb)->zb_objset = objset; \ + (zb)->zb_object = object; \ + (zb)->zb_level = level; \ + (zb)->zb_blkid = blkid; \ +} + +static int +visit_indirect(spa_t *spa, const dnode_phys_t *dnp, + blkptr_t *bp, const zbookmark_t *zb) +{ + int err; + + if (bp->blk_birth == 0) + return (0); + + print_indirect(bp, zb, dnp); + + if (BP_GET_LEVEL(bp) > 0) { + uint32_t flags = ARC_WAIT; + int i; + blkptr_t *cbp; + int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; + arc_buf_t *buf; + uint64_t fill = 0; + + err = arc_read_nolock(NULL, spa, bp, arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) + return (err); + + /* recursively visit blocks below this */ + cbp = buf->b_data; + for (i = 0; i < epb; i++, cbp++) { + zbookmark_t czb; + + SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, + zb->zb_level - 1, + zb->zb_blkid * epb + i); + err = visit_indirect(spa, dnp, cbp, &czb); + if (err) + break; + fill += cbp->blk_fill; + } + ASSERT3U(fill, ==, bp->blk_fill); + (void) arc_buf_remove_ref(buf, &buf); + } + + return (err); +} + +/*ARGSUSED*/ +static void +dump_indirect(dnode_t *dn) +{ + dnode_phys_t *dnp = dn->dn_phys; + int j; + zbookmark_t czb; + + (void) printf("Indirect blocks:\n"); + + SET_BOOKMARK(&czb, dmu_objset_id(&dn->dn_objset->os), + dn->dn_object, dnp->dn_nlevels - 1, 0); + for (j = 0; j < dnp->dn_nblkptr; j++) { + czb.zb_blkid = j; + (void) visit_indirect(dmu_objset_spa(&dn->dn_objset->os), dnp, + &dnp->dn_blkptr[j], &czb); + } + + (void) printf("\n"); +} + +/*ARGSUSED*/ +static void +dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) +{ + dsl_dir_phys_t *dd = data; + time_t crtime; + char nice[6]; + + if (dd == NULL) + return; + + ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); + + crtime = dd->dd_creation_time; + (void) printf("\t\tcreation_time = %s", ctime(&crtime)); + (void) printf("\t\thead_dataset_obj = %llu\n", + (u_longlong_t)dd->dd_head_dataset_obj); + (void) printf("\t\tparent_dir_obj = %llu\n", + (u_longlong_t)dd->dd_parent_obj); + (void) printf("\t\torigin_obj = %llu\n", + (u_longlong_t)dd->dd_origin_obj); + (void) printf("\t\tchild_dir_zapobj = %llu\n", + (u_longlong_t)dd->dd_child_dir_zapobj); + nicenum(dd->dd_used_bytes, nice); + (void) printf("\t\tused_bytes = %s\n", nice); + nicenum(dd->dd_compressed_bytes, nice); + (void) printf("\t\tcompressed_bytes = %s\n", nice); + nicenum(dd->dd_uncompressed_bytes, nice); + (void) printf("\t\tuncompressed_bytes = %s\n", nice); + nicenum(dd->dd_quota, nice); + (void) printf("\t\tquota = %s\n", nice); + nicenum(dd->dd_reserved, nice); + (void) printf("\t\treserved = %s\n", nice); + (void) printf("\t\tprops_zapobj = %llu\n", + (u_longlong_t)dd->dd_props_zapobj); + (void) printf("\t\tdeleg_zapobj = %llu\n", + (u_longlong_t)dd->dd_deleg_zapobj); + (void) printf("\t\tflags = %llx\n", + (u_longlong_t)dd->dd_flags); + +#define DO(which) \ + nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \ + (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) + DO(HEAD); + DO(SNAP); + DO(CHILD); + DO(CHILD_RSRV); + DO(REFRSRV); +#undef DO +} + +/*ARGSUSED*/ +static void +dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) +{ + dsl_dataset_phys_t *ds = data; + time_t crtime; + char used[6], compressed[6], uncompressed[6], unique[6]; + char blkbuf[BP_SPRINTF_LEN]; + + if (ds == NULL) + return; + + ASSERT(size == sizeof (*ds)); + crtime = ds->ds_creation_time; + nicenum(ds->ds_used_bytes, used); + nicenum(ds->ds_compressed_bytes, compressed); + nicenum(ds->ds_uncompressed_bytes, uncompressed); + nicenum(ds->ds_unique_bytes, unique); + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, &ds->ds_bp); + + (void) printf("\t\tdir_obj = %llu\n", + (u_longlong_t)ds->ds_dir_obj); + (void) printf("\t\tprev_snap_obj = %llu\n", + (u_longlong_t)ds->ds_prev_snap_obj); + (void) printf("\t\tprev_snap_txg = %llu\n", + (u_longlong_t)ds->ds_prev_snap_txg); + (void) printf("\t\tnext_snap_obj = %llu\n", + (u_longlong_t)ds->ds_next_snap_obj); + (void) printf("\t\tsnapnames_zapobj = %llu\n", + (u_longlong_t)ds->ds_snapnames_zapobj); + (void) printf("\t\tnum_children = %llu\n", + (u_longlong_t)ds->ds_num_children); + (void) printf("\t\tcreation_time = %s", ctime(&crtime)); + (void) printf("\t\tcreation_txg = %llu\n", + (u_longlong_t)ds->ds_creation_txg); + (void) printf("\t\tdeadlist_obj = %llu\n", + (u_longlong_t)ds->ds_deadlist_obj); + (void) printf("\t\tused_bytes = %s\n", used); + (void) printf("\t\tcompressed_bytes = %s\n", compressed); + (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); + (void) printf("\t\tunique = %s\n", unique); + (void) printf("\t\tfsid_guid = %llu\n", + (u_longlong_t)ds->ds_fsid_guid); + (void) printf("\t\tguid = %llu\n", + (u_longlong_t)ds->ds_guid); + (void) printf("\t\tflags = %llx\n", + (u_longlong_t)ds->ds_flags); + (void) printf("\t\tnext_clones_obj = %llu\n", + (u_longlong_t)ds->ds_next_clones_obj); + (void) printf("\t\tprops_obj = %llu\n", + (u_longlong_t)ds->ds_props_obj); + (void) printf("\t\tbp = %s\n", blkbuf); +} + +static void +dump_bplist(objset_t *mos, uint64_t object, char *name) +{ + bplist_t bpl = { 0 }; + blkptr_t blk, *bp = &blk; + uint64_t itor = 0; + char bytes[6]; + char comp[6]; + char uncomp[6]; + + if (dump_opt['d'] < 3) + return; + + mutex_init(&bpl.bpl_lock, NULL, MUTEX_DEFAULT, NULL); + VERIFY(0 == bplist_open(&bpl, mos, object)); + if (bplist_empty(&bpl)) { + bplist_close(&bpl); + mutex_destroy(&bpl.bpl_lock); + return; + } + + nicenum(bpl.bpl_phys->bpl_bytes, bytes); + if (bpl.bpl_dbuf->db_size == sizeof (bplist_phys_t)) { + nicenum(bpl.bpl_phys->bpl_comp, comp); + nicenum(bpl.bpl_phys->bpl_uncomp, uncomp); + (void) printf("\n %s: %llu entries, %s (%s/%s comp)\n", + name, (u_longlong_t)bpl.bpl_phys->bpl_entries, + bytes, comp, uncomp); + } else { + (void) printf("\n %s: %llu entries, %s\n", + name, (u_longlong_t)bpl.bpl_phys->bpl_entries, bytes); + } + + if (dump_opt['d'] < 5) { + bplist_close(&bpl); + mutex_destroy(&bpl.bpl_lock); + return; + } + + (void) printf("\n"); + + while (bplist_iterate(&bpl, &itor, bp) == 0) { + char blkbuf[BP_SPRINTF_LEN]; + + ASSERT(bp->blk_birth != 0); + sprintf_blkptr_compact(blkbuf, bp, dump_opt['d'] > 5 ? 1 : 0); + (void) printf("\tItem %3llu: %s\n", + (u_longlong_t)itor - 1, blkbuf); + } + + bplist_close(&bpl); + mutex_destroy(&bpl.bpl_lock); +} + +static avl_tree_t idx_tree; +static avl_tree_t domain_tree; +static boolean_t fuid_table_loaded; + +static void +fuid_table_destroy() +{ + if (fuid_table_loaded) { + zfs_fuid_table_destroy(&idx_tree, &domain_tree); + fuid_table_loaded = B_FALSE; + } +} + +/* + * print uid or gid information. + * For normal POSIX id just the id is printed in decimal format. + * For CIFS files with FUID the fuid is printed in hex followed by + * the doman-rid string. + */ +static void +print_idstr(uint64_t id, const char *id_type) +{ + if (FUID_INDEX(id)) { + char *domain; + + domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); + (void) printf("\t%s %llx [%s-%d]\n", id_type, + (u_longlong_t)id, domain, (int)FUID_RID(id)); + } else { + (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); + } + +} + +static void +dump_uidgid(objset_t *os, znode_phys_t *zp) +{ + uint32_t uid_idx, gid_idx; + + uid_idx = FUID_INDEX(zp->zp_uid); + gid_idx = FUID_INDEX(zp->zp_gid); + + /* Load domain table, if not already loaded */ + if (!fuid_table_loaded && (uid_idx || gid_idx)) { + uint64_t fuid_obj; + + /* first find the fuid object. It lives in the master node */ + VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, + 8, 1, &fuid_obj) == 0); + (void) zfs_fuid_table_load(os, fuid_obj, + &idx_tree, &domain_tree); + fuid_table_loaded = B_TRUE; + } + + print_idstr(zp->zp_uid, "uid"); + print_idstr(zp->zp_gid, "gid"); +} + +/*ARGSUSED*/ +static void +dump_znode(objset_t *os, uint64_t object, void *data, size_t size) +{ + znode_phys_t *zp = data; + time_t z_crtime, z_atime, z_mtime, z_ctime; + char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ + int error; + + ASSERT(size >= sizeof (znode_phys_t)); + + error = zfs_obj_to_path(os, object, path, sizeof (path)); + if (error != 0) { + (void) snprintf(path, sizeof (path), "\?\?\?", + (u_longlong_t)object); + } + + if (dump_opt['d'] < 3) { + (void) printf("\t%s\n", path); + return; + } + + z_crtime = (time_t)zp->zp_crtime[0]; + z_atime = (time_t)zp->zp_atime[0]; + z_mtime = (time_t)zp->zp_mtime[0]; + z_ctime = (time_t)zp->zp_ctime[0]; + + (void) printf("\tpath %s\n", path); + dump_uidgid(os, zp); + (void) printf("\tatime %s", ctime(&z_atime)); + (void) printf("\tmtime %s", ctime(&z_mtime)); + (void) printf("\tctime %s", ctime(&z_ctime)); + (void) printf("\tcrtime %s", ctime(&z_crtime)); + (void) printf("\tgen %llu\n", (u_longlong_t)zp->zp_gen); + (void) printf("\tmode %llo\n", (u_longlong_t)zp->zp_mode); + (void) printf("\tsize %llu\n", (u_longlong_t)zp->zp_size); + (void) printf("\tparent %llu\n", (u_longlong_t)zp->zp_parent); + (void) printf("\tlinks %llu\n", (u_longlong_t)zp->zp_links); + (void) printf("\txattr %llu\n", (u_longlong_t)zp->zp_xattr); + (void) printf("\trdev 0x%016llx\n", (u_longlong_t)zp->zp_rdev); +} + +/*ARGSUSED*/ +static void +dump_acl(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +/*ARGSUSED*/ +static void +dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) +{ +} + +static object_viewer_t *object_viewer[DMU_OT_NUMTYPES] = { + dump_none, /* unallocated */ + dump_zap, /* object directory */ + dump_uint64, /* object array */ + dump_none, /* packed nvlist */ + dump_packed_nvlist, /* packed nvlist size */ + dump_none, /* bplist */ + dump_none, /* bplist header */ + dump_none, /* SPA space map header */ + dump_none, /* SPA space map */ + dump_none, /* ZIL intent log */ + dump_dnode, /* DMU dnode */ + dump_dmu_objset, /* DMU objset */ + dump_dsl_dir, /* DSL directory */ + dump_zap, /* DSL directory child map */ + dump_zap, /* DSL dataset snap map */ + dump_zap, /* DSL props */ + dump_dsl_dataset, /* DSL dataset */ + dump_znode, /* ZFS znode */ + dump_acl, /* ZFS V0 ACL */ + dump_uint8, /* ZFS plain file */ + dump_zpldir, /* ZFS directory */ + dump_zap, /* ZFS master node */ + dump_zap, /* ZFS delete queue */ + dump_uint8, /* zvol object */ + dump_zap, /* zvol prop */ + dump_uint8, /* other uint8[] */ + dump_uint64, /* other uint64[] */ + dump_zap, /* other ZAP */ + dump_zap, /* persistent error log */ + dump_uint8, /* SPA history */ + dump_uint64, /* SPA history offsets */ + dump_zap, /* Pool properties */ + dump_zap, /* DSL permissions */ + dump_acl, /* ZFS ACL */ + dump_uint8, /* ZFS SYSACL */ + dump_none, /* FUID nvlist */ + dump_packed_nvlist, /* FUID nvlist size */ + dump_zap, /* DSL dataset next clones */ + dump_zap, /* DSL scrub queue */ +}; + +static void +dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) +{ + dmu_buf_t *db = NULL; + dmu_object_info_t doi; + dnode_t *dn; + void *bonus = NULL; + size_t bsize = 0; + char iblk[6], dblk[6], lsize[6], asize[6], bonus_size[6], segsize[6]; + char aux[50]; + int error; + + if (*print_header) { + (void) printf("\n Object lvl iblk dblk lsize" + " asize type\n"); + *print_header = 0; + } + + if (object == 0) { + dn = os->os->os_meta_dnode; + } else { + error = dmu_bonus_hold(os, object, FTAG, &db); + if (error) + fatal("dmu_bonus_hold(%llu) failed, errno %u", + object, error); + bonus = db->db_data; + bsize = db->db_size; + dn = ((dmu_buf_impl_t *)db)->db_dnode; + } + dmu_object_info_from_dnode(dn, &doi); + + nicenum(doi.doi_metadata_block_size, iblk); + nicenum(doi.doi_data_block_size, dblk); + nicenum(doi.doi_data_block_size * (doi.doi_max_block_offset + 1), + lsize); + nicenum(doi.doi_physical_blks << 9, asize); + nicenum(doi.doi_bonus_size, bonus_size); + + aux[0] = '\0'; + + if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { + (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", + zio_checksum_table[doi.doi_checksum].ci_name); + } + + if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { + (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", + zio_compress_table[doi.doi_compress].ci_name); + } + + (void) printf("%10lld %3u %5s %5s %5s %5s %s%s\n", + (u_longlong_t)object, doi.doi_indirection, iblk, dblk, lsize, + asize, dmu_ot[doi.doi_type].ot_name, aux); + + if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { + (void) printf("%10s %3s %5s %5s %5s %5s %s\n", + "", "", "", "", bonus_size, "bonus", + dmu_ot[doi.doi_bonus_type].ot_name); + } + + if (verbosity >= 4) { + object_viewer[doi.doi_bonus_type](os, object, bonus, bsize); + object_viewer[doi.doi_type](os, object, NULL, 0); + *print_header = 1; + } + + if (verbosity >= 5) + dump_indirect(dn); + + if (verbosity >= 5) { + /* + * Report the list of segments that comprise the object. + */ + uint64_t start = 0; + uint64_t end; + uint64_t blkfill = 1; + int minlvl = 1; + + if (dn->dn_type == DMU_OT_DNODE) { + minlvl = 0; + blkfill = DNODES_PER_BLOCK; + } + + for (;;) { + error = dnode_next_offset(dn, + 0, &start, minlvl, blkfill, 0); + if (error) + break; + end = start; + error = dnode_next_offset(dn, + DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); + nicenum(end - start, segsize); + (void) printf("\t\tsegment [%016llx, %016llx)" + " size %5s\n", (u_longlong_t)start, + (u_longlong_t)end, segsize); + if (error) + break; + start = end; + } + } + + if (db != NULL) + dmu_buf_rele(db, FTAG); +} + +static char *objset_types[DMU_OST_NUMTYPES] = { + "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; + +static void +dump_dir(objset_t *os) +{ + dmu_objset_stats_t dds; + uint64_t object, object_count; + uint64_t refdbytes, usedobjs, scratch; + char numbuf[8]; + char blkbuf[BP_SPRINTF_LEN]; + char osname[MAXNAMELEN]; + char *type = "UNKNOWN"; + int verbosity = dump_opt['d']; + int print_header = 1; + int i, error; + + dmu_objset_fast_stat(os, &dds); + + if (dds.dds_type < DMU_OST_NUMTYPES) + type = objset_types[dds.dds_type]; + + if (dds.dds_type == DMU_OST_META) { + dds.dds_creation_txg = TXG_INITIAL; + usedobjs = os->os->os_rootbp->blk_fill; + refdbytes = os->os->os_spa->spa_dsl_pool-> + dp_mos_dir->dd_phys->dd_used_bytes; + } else { + dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); + } + + ASSERT3U(usedobjs, ==, os->os->os_rootbp->blk_fill); + + nicenum(refdbytes, numbuf); + + if (verbosity >= 4) { + (void) strcpy(blkbuf, ", rootbp "); + sprintf_blkptr(blkbuf + strlen(blkbuf), + BP_SPRINTF_LEN - strlen(blkbuf), os->os->os_rootbp); + } else { + blkbuf[0] = '\0'; + } + + dmu_objset_name(os, osname); + + (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " + "%s, %llu objects%s\n", + osname, type, (u_longlong_t)dmu_objset_id(os), + (u_longlong_t)dds.dds_creation_txg, + numbuf, (u_longlong_t)usedobjs, blkbuf); + + dump_intent_log(dmu_objset_zil(os)); + + if (dmu_objset_ds(os) != NULL) + dump_bplist(dmu_objset_pool(os)->dp_meta_objset, + dmu_objset_ds(os)->ds_phys->ds_deadlist_obj, "Deadlist"); + + if (verbosity < 2) + return; + + if (os->os->os_rootbp->blk_birth == 0) + return; + + if (zopt_objects != 0) { + for (i = 0; i < zopt_objects; i++) + dump_object(os, zopt_object[i], verbosity, + &print_header); + (void) printf("\n"); + return; + } + + dump_object(os, 0, verbosity, &print_header); + object_count = 1; + + object = 0; + while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { + dump_object(os, object, verbosity, &print_header); + object_count++; + } + + ASSERT3U(object_count, ==, usedobjs); + + (void) printf("\n"); + + if (error != ESRCH) + fatal("dmu_object_next() = %d", error); +} + +static void +dump_uberblock(uberblock_t *ub) +{ + time_t timestamp = ub->ub_timestamp; + + (void) printf("Uberblock\n\n"); + (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); + (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); + (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); + (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); + (void) printf("\ttimestamp = %llu UTC = %s", + (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); + if (dump_opt['u'] >= 3) { + char blkbuf[BP_SPRINTF_LEN]; + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, &ub->ub_rootbp); + (void) printf("\trootbp = %s\n", blkbuf); + } + (void) printf("\n"); +} + +static void +dump_config(const char *pool) +{ + spa_t *spa = NULL; + + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(spa)) != NULL) { + if (pool == NULL) + (void) printf("%s\n", spa_name(spa)); + if (pool == NULL || strcmp(pool, spa_name(spa)) == 0) + dump_nvlist(spa->spa_config, 4); + } + mutex_exit(&spa_namespace_lock); +} + +static void +dump_cachefile(const char *cachefile) +{ + int fd; + struct stat64 statbuf; + char *buf; + nvlist_t *config; + + if ((fd = open64(cachefile, O_RDONLY)) < 0) { + (void) printf("cannot open '%s': %s\n", cachefile, + strerror(errno)); + exit(1); + } + + if (fstat64(fd, &statbuf) != 0) { + (void) printf("failed to stat '%s': %s\n", cachefile, + strerror(errno)); + exit(1); + } + + if ((buf = malloc(statbuf.st_size)) == NULL) { + (void) fprintf(stderr, "failed to allocate %llu bytes\n", + (u_longlong_t)statbuf.st_size); + exit(1); + } + + if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { + (void) fprintf(stderr, "failed to read %llu bytes\n", + (u_longlong_t)statbuf.st_size); + exit(1); + } + + (void) close(fd); + + if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { + (void) fprintf(stderr, "failed to unpack nvlist\n"); + exit(1); + } + + free(buf); + + dump_nvlist(config, 0); + + nvlist_free(config); +} + +static void +dump_label(const char *dev) +{ + int fd; + vdev_label_t label; + char *buf = label.vl_vdev_phys.vp_nvlist; + size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); + struct stat64 statbuf; + uint64_t psize; + int l; + + if ((fd = open64(dev, O_RDONLY)) < 0) { + (void) printf("cannot open '%s': %s\n", dev, strerror(errno)); + exit(1); + } + + if (fstat64(fd, &statbuf) != 0) { + (void) printf("failed to stat '%s': %s\n", dev, + strerror(errno)); + exit(1); + } + + psize = statbuf.st_size; + psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); + + for (l = 0; l < VDEV_LABELS; l++) { + + nvlist_t *config = NULL; + + (void) printf("--------------------------------------------\n"); + (void) printf("LABEL %d\n", l); + (void) printf("--------------------------------------------\n"); + + if (pread64(fd, &label, sizeof (label), + vdev_label_offset(psize, l, 0)) != sizeof (label)) { + (void) printf("failed to read label %d\n", l); + continue; + } + + if (nvlist_unpack(buf, buflen, &config, 0) != 0) { + (void) printf("failed to unpack label %d\n", l); + continue; + } + dump_nvlist(config, 4); + nvlist_free(config); + } +} + +/*ARGSUSED*/ +static int +dump_one_dir(char *dsname, void *arg) +{ + int error; + objset_t *os; + + error = dmu_objset_open(dsname, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os); + if (error) { + (void) printf("Could not open %s\n", dsname); + return (0); + } + dump_dir(os); + dmu_objset_close(os); + fuid_table_destroy(); + return (0); +} + +static void +zdb_leak(space_map_t *sm, uint64_t start, uint64_t size) +{ + vdev_t *vd = sm->sm_ppd; + + (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", + (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); +} + +/* ARGSUSED */ +static void +zdb_space_map_load(space_map_t *sm) +{ +} + +static void +zdb_space_map_unload(space_map_t *sm) +{ + space_map_vacate(sm, zdb_leak, sm); +} + +/* ARGSUSED */ +static void +zdb_space_map_claim(space_map_t *sm, uint64_t start, uint64_t size) +{ +} + +static space_map_ops_t zdb_space_map_ops = { + zdb_space_map_load, + zdb_space_map_unload, + NULL, /* alloc */ + zdb_space_map_claim, + NULL /* free */ +}; + +static void +zdb_leak_init(spa_t *spa) +{ + vdev_t *rvd = spa->spa_root_vdev; + + for (int c = 0; c < rvd->vdev_children; c++) { + vdev_t *vd = rvd->vdev_child[c]; + for (int m = 0; m < vd->vdev_ms_count; m++) { + metaslab_t *msp = vd->vdev_ms[m]; + mutex_enter(&msp->ms_lock); + VERIFY(space_map_load(&msp->ms_map, &zdb_space_map_ops, + SM_ALLOC, &msp->ms_smo, spa->spa_meta_objset) == 0); + msp->ms_map.sm_ppd = vd; + mutex_exit(&msp->ms_lock); + } + } +} + +static void +zdb_leak_fini(spa_t *spa) +{ + vdev_t *rvd = spa->spa_root_vdev; + + for (int c = 0; c < rvd->vdev_children; c++) { + vdev_t *vd = rvd->vdev_child[c]; + for (int m = 0; m < vd->vdev_ms_count; m++) { + metaslab_t *msp = vd->vdev_ms[m]; + mutex_enter(&msp->ms_lock); + space_map_unload(&msp->ms_map); + mutex_exit(&msp->ms_lock); + } + } +} + +/* + * Verify that the sum of the sizes of all blocks in the pool adds up + * to the SPA's sa_alloc total. + */ +typedef struct zdb_blkstats { + uint64_t zb_asize; + uint64_t zb_lsize; + uint64_t zb_psize; + uint64_t zb_count; +} zdb_blkstats_t; + +#define DMU_OT_DEFERRED DMU_OT_NONE +#define DMU_OT_TOTAL DMU_OT_NUMTYPES + +#define ZB_TOTAL DN_MAX_LEVELS + +typedef struct zdb_cb { + zdb_blkstats_t zcb_type[ZB_TOTAL + 1][DMU_OT_TOTAL + 1]; + uint64_t zcb_errors[256]; + int zcb_readfails; + int zcb_haderrors; +} zdb_cb_t; + +static void +zdb_count_block(spa_t *spa, zdb_cb_t *zcb, blkptr_t *bp, dmu_object_type_t type) +{ + for (int i = 0; i < 4; i++) { + int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; + int t = (i & 1) ? type : DMU_OT_TOTAL; + zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; + + zb->zb_asize += BP_GET_ASIZE(bp); + zb->zb_lsize += BP_GET_LSIZE(bp); + zb->zb_psize += BP_GET_PSIZE(bp); + zb->zb_count++; + } + + if (dump_opt['S']) { + boolean_t print_sig; + + print_sig = !zdb_sig_user_data || (BP_GET_LEVEL(bp) == 0 && + BP_GET_TYPE(bp) == DMU_OT_PLAIN_FILE_CONTENTS); + + if (BP_GET_CHECKSUM(bp) < zdb_sig_cksumalg) + print_sig = B_FALSE; + + if (print_sig) { + (void) printf("%llu\t%lld\t%lld\t%s\t%s\t%s\t" + "%llx:%llx:%llx:%llx\n", + (u_longlong_t)BP_GET_LEVEL(bp), + (longlong_t)BP_GET_PSIZE(bp), + (longlong_t)BP_GET_NDVAS(bp), + dmu_ot[BP_GET_TYPE(bp)].ot_name, + zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, + zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, + (u_longlong_t)bp->blk_cksum.zc_word[0], + (u_longlong_t)bp->blk_cksum.zc_word[1], + (u_longlong_t)bp->blk_cksum.zc_word[2], + (u_longlong_t)bp->blk_cksum.zc_word[3]); + } + } + + if (!dump_opt['L']) + VERIFY(zio_wait(zio_claim(NULL, spa, spa_first_txg(spa), bp, + NULL, NULL, ZIO_FLAG_MUSTSUCCEED)) == 0); +} + +static int +zdb_blkptr_cb(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb, + const dnode_phys_t *dnp, void *arg) +{ + zdb_cb_t *zcb = arg; + char blkbuf[BP_SPRINTF_LEN]; + + if (bp == NULL) + return (0); + + zdb_count_block(spa, zcb, bp, BP_GET_TYPE(bp)); + + if (dump_opt['c'] || dump_opt['S']) { + int ioerr, size; + void *data; + + size = BP_GET_LSIZE(bp); + data = malloc(size); + ioerr = zio_wait(zio_read(NULL, spa, bp, data, size, + NULL, NULL, ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB, zb)); + free(data); + + /* We expect io errors on intent log */ + if (ioerr && BP_GET_TYPE(bp) != DMU_OT_INTENT_LOG) { + zcb->zcb_haderrors = 1; + zcb->zcb_errors[ioerr]++; + + if (dump_opt['b'] >= 2) + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, bp); + else + blkbuf[0] = '\0'; + + if (!dump_opt['S']) { + (void) printf("zdb_blkptr_cb: " + "Got error %d reading " + "<%llu, %llu, %lld, %llx> %s -- skipping\n", + ioerr, + (u_longlong_t)zb->zb_objset, + (u_longlong_t)zb->zb_object, + (u_longlong_t)zb->zb_level, + (u_longlong_t)zb->zb_blkid, + blkbuf); + } + } + } + + zcb->zcb_readfails = 0; + + if (dump_opt['b'] >= 4) { + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, bp); + (void) printf("objset %llu object %llu offset 0x%llx %s\n", + (u_longlong_t)zb->zb_objset, + (u_longlong_t)zb->zb_object, + (u_longlong_t)blkid2offset(dnp, zb->zb_level, zb->zb_blkid), + blkbuf); + } + + return (0); +} + +static int +dump_block_stats(spa_t *spa) +{ + zdb_cb_t zcb = { 0 }; + zdb_blkstats_t *zb, *tzb; + uint64_t alloc, space, logalloc; + vdev_t *rvd = spa->spa_root_vdev; + int leaks = 0; + int c, e; + + if (!dump_opt['S']) { + (void) printf("\nTraversing all blocks %s%s%s%s...\n", + (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", + dump_opt['c'] ? "checksums " : "", + (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", + !dump_opt['L'] ? "nothing leaked " : ""); + } + + /* + * Load all space maps as SM_ALLOC maps, then traverse the pool + * claiming each block we discover. If the pool is perfectly + * consistent, the space maps will be empty when we're done. + * Anything left over is a leak; any block we can't claim (because + * it's not part of any space map) is a double allocation, + * reference to a freed block, or an unclaimed log block. + */ + if (!dump_opt['L']) + zdb_leak_init(spa); + + /* + * If there's a deferred-free bplist, process that first. + */ + if (spa->spa_sync_bplist_obj != 0) { + bplist_t *bpl = &spa->spa_sync_bplist; + blkptr_t blk; + uint64_t itor = 0; + + VERIFY(0 == bplist_open(bpl, spa->spa_meta_objset, + spa->spa_sync_bplist_obj)); + + while (bplist_iterate(bpl, &itor, &blk) == 0) { + if (dump_opt['b'] >= 4) { + char blkbuf[BP_SPRINTF_LEN]; + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, &blk); + (void) printf("[%s] %s\n", + "deferred free", blkbuf); + } + zdb_count_block(spa, &zcb, &blk, DMU_OT_DEFERRED); + } + + bplist_close(bpl); + } + + zcb.zcb_haderrors |= traverse_pool(spa, zdb_blkptr_cb, &zcb); + + if (zcb.zcb_haderrors && !dump_opt['S']) { + (void) printf("\nError counts:\n\n"); + (void) printf("\t%5s %s\n", "errno", "count"); + for (e = 0; e < 256; e++) { + if (zcb.zcb_errors[e] != 0) { + (void) printf("\t%5d %llu\n", + e, (u_longlong_t)zcb.zcb_errors[e]); + } + } + } + + /* + * Report any leaked segments. + */ + if (!dump_opt['L']) + zdb_leak_fini(spa); + + /* + * If we're interested in printing out the blkptr signatures, + * return now as we don't print out anything else (including + * errors and leaks). + */ + if (dump_opt['S']) + return (zcb.zcb_haderrors ? 3 : 0); + + alloc = spa_get_alloc(spa); + space = spa_get_space(spa); + + /* + * Log blocks allocated from a separate log device don't count + * as part of the normal pool space; factor them in here. + */ + logalloc = 0; + + for (c = 0; c < rvd->vdev_children; c++) + if (rvd->vdev_child[c]->vdev_islog) + logalloc += rvd->vdev_child[c]->vdev_stat.vs_alloc; + + tzb = &zcb.zcb_type[ZB_TOTAL][DMU_OT_TOTAL]; + + if (tzb->zb_asize == alloc + logalloc) { + if (!dump_opt['L']) + (void) printf("\n\tNo leaks (block sum matches space" + " maps exactly)\n"); + } else { + (void) printf("block traversal size %llu != alloc %llu " + "(%s %lld)\n", + (u_longlong_t)tzb->zb_asize, + (u_longlong_t)alloc + logalloc, + (dump_opt['L']) ? "unreachable" : "leaked", + (longlong_t)(alloc + logalloc - tzb->zb_asize)); + leaks = 1; + } + + if (tzb->zb_count == 0) + return (2); + + (void) printf("\n"); + (void) printf("\tbp count: %10llu\n", + (u_longlong_t)tzb->zb_count); + (void) printf("\tbp logical: %10llu\t avg: %6llu\n", + (u_longlong_t)tzb->zb_lsize, + (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); + (void) printf("\tbp physical: %10llu\t avg:" + " %6llu\tcompression: %6.2f\n", + (u_longlong_t)tzb->zb_psize, + (u_longlong_t)(tzb->zb_psize / tzb->zb_count), + (double)tzb->zb_lsize / tzb->zb_psize); + (void) printf("\tbp allocated: %10llu\t avg:" + " %6llu\tcompression: %6.2f\n", + (u_longlong_t)tzb->zb_asize, + (u_longlong_t)(tzb->zb_asize / tzb->zb_count), + (double)tzb->zb_lsize / tzb->zb_asize); + (void) printf("\tSPA allocated: %10llu\tused: %5.2f%%\n", + (u_longlong_t)alloc, 100.0 * alloc / space); + + if (dump_opt['b'] >= 2) { + int l, t, level; + (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" + "\t avg\t comp\t%%Total\tType\n"); + + for (t = 0; t <= DMU_OT_NUMTYPES; t++) { + char csize[6], lsize[6], psize[6], asize[6], avg[6]; + char *typename; + + typename = t == DMU_OT_DEFERRED ? "deferred free" : + t == DMU_OT_TOTAL ? "Total" : dmu_ot[t].ot_name; + + if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { + (void) printf("%6s\t%5s\t%5s\t%5s" + "\t%5s\t%5s\t%6s\t%s\n", + "-", + "-", + "-", + "-", + "-", + "-", + "-", + typename); + continue; + } + + for (l = ZB_TOTAL - 1; l >= -1; l--) { + level = (l == -1 ? ZB_TOTAL : l); + zb = &zcb.zcb_type[level][t]; + + if (zb->zb_asize == 0) + continue; + + if (dump_opt['b'] < 3 && level != ZB_TOTAL) + continue; + + if (level == 0 && zb->zb_asize == + zcb.zcb_type[ZB_TOTAL][t].zb_asize) + continue; + + nicenum(zb->zb_count, csize); + nicenum(zb->zb_lsize, lsize); + nicenum(zb->zb_psize, psize); + nicenum(zb->zb_asize, asize); + nicenum(zb->zb_asize / zb->zb_count, avg); + + (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" + "\t%5.2f\t%6.2f\t", + csize, lsize, psize, asize, avg, + (double)zb->zb_lsize / zb->zb_psize, + 100.0 * zb->zb_asize / tzb->zb_asize); + + if (level == ZB_TOTAL) + (void) printf("%s\n", typename); + else + (void) printf(" L%d %s\n", + level, typename); + } + } + } + + (void) printf("\n"); + + if (leaks) + return (2); + + if (zcb.zcb_haderrors) + return (3); + + return (0); +} + +static void +dump_zpool(spa_t *spa) +{ + dsl_pool_t *dp = spa_get_dsl(spa); + int rc = 0; + + if (dump_opt['u']) + dump_uberblock(&spa->spa_uberblock); + + if (dump_opt['d'] || dump_opt['i']) { + dump_dir(dp->dp_meta_objset); + if (dump_opt['d'] >= 3) { + dump_bplist(dp->dp_meta_objset, + spa->spa_sync_bplist_obj, "Deferred frees"); + dump_dtl(spa->spa_root_vdev, 0); + dump_metaslabs(spa); + } + (void) dmu_objset_find(spa_name(spa), dump_one_dir, NULL, + DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); + } + + if (dump_opt['b'] || dump_opt['c'] || dump_opt['S']) + rc = dump_block_stats(spa); + + if (dump_opt['s']) + show_pool_stats(spa); + + if (rc != 0) + exit(rc); +} + +#define ZDB_FLAG_CHECKSUM 0x0001 +#define ZDB_FLAG_DECOMPRESS 0x0002 +#define ZDB_FLAG_BSWAP 0x0004 +#define ZDB_FLAG_GBH 0x0008 +#define ZDB_FLAG_INDIRECT 0x0010 +#define ZDB_FLAG_PHYS 0x0020 +#define ZDB_FLAG_RAW 0x0040 +#define ZDB_FLAG_PRINT_BLKPTR 0x0080 + +int flagbits[256]; + +static void +zdb_print_blkptr(blkptr_t *bp, int flags) +{ + dva_t *dva = bp->blk_dva; + int d; + + if (flags & ZDB_FLAG_BSWAP) + byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); + /* + * Super-ick warning: This code is also duplicated in + * cmd/mdb/common/modules/zfs/zfs.c . Yeah, I hate code + * replication, too. + */ + for (d = 0; d < BP_GET_NDVAS(bp); d++) { + (void) printf("\tDVA[%d]: vdev_id %lld / %llx\n", d, + (longlong_t)DVA_GET_VDEV(&dva[d]), + (longlong_t)DVA_GET_OFFSET(&dva[d])); + (void) printf("\tDVA[%d]: GANG: %-5s GRID: %04llx\t" + "ASIZE: %llx\n", d, + DVA_GET_GANG(&dva[d]) ? "TRUE" : "FALSE", + (longlong_t)DVA_GET_GRID(&dva[d]), + (longlong_t)DVA_GET_ASIZE(&dva[d])); + (void) printf("\tDVA[%d]: :%llu:%llx:%llx:%s%s%s%s\n", d, + (u_longlong_t)DVA_GET_VDEV(&dva[d]), + (longlong_t)DVA_GET_OFFSET(&dva[d]), + (longlong_t)BP_GET_PSIZE(bp), + BP_SHOULD_BYTESWAP(bp) ? "e" : "", + !DVA_GET_GANG(&dva[d]) && BP_GET_LEVEL(bp) != 0 ? + "d" : "", + DVA_GET_GANG(&dva[d]) ? "g" : "", + BP_GET_COMPRESS(bp) != 0 ? "d" : ""); + } + (void) printf("\tLSIZE: %-16llx\t\tPSIZE: %llx\n", + (longlong_t)BP_GET_LSIZE(bp), (longlong_t)BP_GET_PSIZE(bp)); + (void) printf("\tENDIAN: %6s\t\t\t\t\tTYPE: %s\n", + BP_GET_BYTEORDER(bp) ? "LITTLE" : "BIG", + dmu_ot[BP_GET_TYPE(bp)].ot_name); + (void) printf("\tBIRTH: %-16llx LEVEL: %-2llu\tFILL: %llx\n", + (u_longlong_t)bp->blk_birth, (u_longlong_t)BP_GET_LEVEL(bp), + (u_longlong_t)bp->blk_fill); + (void) printf("\tCKFUNC: %-16s\t\tCOMP: %s\n", + zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, + zio_compress_table[BP_GET_COMPRESS(bp)].ci_name); + (void) printf("\tCKSUM: %llx:%llx:%llx:%llx\n", + (u_longlong_t)bp->blk_cksum.zc_word[0], + (u_longlong_t)bp->blk_cksum.zc_word[1], + (u_longlong_t)bp->blk_cksum.zc_word[2], + (u_longlong_t)bp->blk_cksum.zc_word[3]); +} + +static void +zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) +{ + int i; + + for (i = 0; i < nbps; i++) + zdb_print_blkptr(&bp[i], flags); +} + +static void +zdb_dump_gbh(void *buf, int flags) +{ + zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); +} + +static void +zdb_dump_block_raw(void *buf, uint64_t size, int flags) +{ + if (flags & ZDB_FLAG_BSWAP) + byteswap_uint64_array(buf, size); + (void) write(2, buf, size); +} + +static void +zdb_dump_block(char *label, void *buf, uint64_t size, int flags) +{ + uint64_t *d = (uint64_t *)buf; + int nwords = size / sizeof (uint64_t); + int do_bswap = !!(flags & ZDB_FLAG_BSWAP); + int i, j; + char *hdr, *c; + + + if (do_bswap) + hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; + else + hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; + + (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); + + for (i = 0; i < nwords; i += 2) { + (void) printf("%06llx: %016llx %016llx ", + (u_longlong_t)(i * sizeof (uint64_t)), + (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), + (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); + + c = (char *)&d[i]; + for (j = 0; j < 2 * sizeof (uint64_t); j++) + (void) printf("%c", isprint(c[j]) ? c[j] : '.'); + (void) printf("\n"); + } +} + +/* + * There are two acceptable formats: + * leaf_name - For example: c1t0d0 or /tmp/ztest.0a + * child[.child]* - For example: 0.1.1 + * + * The second form can be used to specify arbitrary vdevs anywhere + * in the heirarchy. For example, in a pool with a mirror of + * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . + */ +static vdev_t * +zdb_vdev_lookup(vdev_t *vdev, char *path) +{ + char *s, *p, *q; + int i; + + if (vdev == NULL) + return (NULL); + + /* First, assume the x.x.x.x format */ + i = (int)strtoul(path, &s, 10); + if (s == path || (s && *s != '.' && *s != '\0')) + goto name; + if (i < 0 || i >= vdev->vdev_children) + return (NULL); + + vdev = vdev->vdev_child[i]; + if (*s == '\0') + return (vdev); + return (zdb_vdev_lookup(vdev, s+1)); + +name: + for (i = 0; i < vdev->vdev_children; i++) { + vdev_t *vc = vdev->vdev_child[i]; + + if (vc->vdev_path == NULL) { + vc = zdb_vdev_lookup(vc, path); + if (vc == NULL) + continue; + else + return (vc); + } + + p = strrchr(vc->vdev_path, '/'); + p = p ? p + 1 : vc->vdev_path; + q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; + + if (strcmp(vc->vdev_path, path) == 0) + return (vc); + if (strcmp(p, path) == 0) + return (vc); + if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) + return (vc); + } + + return (NULL); +} + +/* + * Read a block from a pool and print it out. The syntax of the + * block descriptor is: + * + * pool:vdev_specifier:offset:size[:flags] + * + * pool - The name of the pool you wish to read from + * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) + * offset - offset, in hex, in bytes + * size - Amount of data to read, in hex, in bytes + * flags - A string of characters specifying options + * b: Decode a blkptr at given offset within block + * *c: Calculate and display checksums + * *d: Decompress data before dumping + * e: Byteswap data before dumping + * *g: Display data as a gang block header + * *i: Display as an indirect block + * p: Do I/O to physical offset + * r: Dump raw data to stdout + * + * * = not yet implemented + */ +static void +zdb_read_block(char *thing, spa_t **spap) +{ + spa_t *spa = *spap; + int flags = 0; + uint64_t offset = 0, size = 0, blkptr_offset = 0; + zio_t *zio; + vdev_t *vd; + void *buf; + char *s, *p, *dup, *pool, *vdev, *flagstr; + int i, error, zio_flags; + + dup = strdup(thing); + s = strtok(dup, ":"); + pool = s ? s : ""; + s = strtok(NULL, ":"); + vdev = s ? s : ""; + s = strtok(NULL, ":"); + offset = strtoull(s ? s : "", NULL, 16); + s = strtok(NULL, ":"); + size = strtoull(s ? s : "", NULL, 16); + s = strtok(NULL, ":"); + flagstr = s ? s : ""; + + s = NULL; + if (size == 0) + s = "size must not be zero"; + if (!IS_P2ALIGNED(size, DEV_BSIZE)) + s = "size must be a multiple of sector size"; + if (!IS_P2ALIGNED(offset, DEV_BSIZE)) + s = "offset must be a multiple of sector size"; + if (s) { + (void) printf("Invalid block specifier: %s - %s\n", thing, s); + free(dup); + return; + } + + for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { + for (i = 0; flagstr[i]; i++) { + int bit = flagbits[(uchar_t)flagstr[i]]; + + if (bit == 0) { + (void) printf("***Invalid flag: %c\n", + flagstr[i]); + continue; + } + flags |= bit; + + /* If it's not something with an argument, keep going */ + if ((bit & (ZDB_FLAG_CHECKSUM | ZDB_FLAG_DECOMPRESS | + ZDB_FLAG_PRINT_BLKPTR)) == 0) + continue; + + p = &flagstr[i + 1]; + if (bit == ZDB_FLAG_PRINT_BLKPTR) + blkptr_offset = strtoull(p, &p, 16); + if (*p != ':' && *p != '\0') { + (void) printf("***Invalid flag arg: '%s'\n", s); + free(dup); + return; + } + } + } + + if (spa == NULL || strcmp(spa_name(spa), pool) != 0) { + if (spa) + spa_close(spa, (void *)zdb_read_block); + error = spa_open(pool, spap, (void *)zdb_read_block); + if (error) + fatal("Failed to open pool '%s': %s", + pool, strerror(error)); + spa = *spap; + } + + vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); + if (vd == NULL) { + (void) printf("***Invalid vdev: %s\n", vdev); + free(dup); + return; + } else { + if (vd->vdev_path) + (void) printf("Found vdev: %s\n", vd->vdev_path); + else + (void) printf("Found vdev type: %s\n", + vd->vdev_ops->vdev_op_type); + } + + buf = umem_alloc(size, UMEM_NOFAIL); + + zio_flags = ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | + ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY; + + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + zio = zio_root(spa, NULL, NULL, 0); + /* XXX todo - cons up a BP so RAID-Z will be happy */ + zio_nowait(zio_vdev_child_io(zio, NULL, vd, offset, buf, size, + ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, zio_flags, NULL, NULL)); + error = zio_wait(zio); + spa_config_exit(spa, SCL_STATE, FTAG); + + if (error) { + (void) printf("Read of %s failed, error: %d\n", thing, error); + goto out; + } + + if (flags & ZDB_FLAG_PRINT_BLKPTR) + zdb_print_blkptr((blkptr_t *)(void *) + ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); + else if (flags & ZDB_FLAG_RAW) + zdb_dump_block_raw(buf, size, flags); + else if (flags & ZDB_FLAG_INDIRECT) + zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), + flags); + else if (flags & ZDB_FLAG_GBH) + zdb_dump_gbh(buf, flags); + else + zdb_dump_block(thing, buf, size, flags); + +out: + umem_free(buf, size); + free(dup); +} + +static boolean_t +nvlist_string_match(nvlist_t *config, char *name, char *tgt) +{ + char *s; + + if (nvlist_lookup_string(config, name, &s) != 0) + return (B_FALSE); + + return (strcmp(s, tgt) == 0); +} + +static boolean_t +nvlist_uint64_match(nvlist_t *config, char *name, uint64_t tgt) +{ + uint64_t val; + + if (nvlist_lookup_uint64(config, name, &val) != 0) + return (B_FALSE); + + return (val == tgt); +} + +static boolean_t +vdev_child_guid_match(nvlist_t *vdev, uint64_t guid) +{ + nvlist_t **child; + uint_t c, children; + + verify(nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0); + for (c = 0; c < children; ++c) + if (nvlist_uint64_match(child[c], ZPOOL_CONFIG_GUID, guid)) + return (B_TRUE); + return (B_FALSE); +} + +static boolean_t +vdev_child_string_match(nvlist_t *vdev, char *tgt) +{ + nvlist_t **child; + uint_t c, children; + + verify(nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0); + for (c = 0; c < children; ++c) { + if (nvlist_string_match(child[c], ZPOOL_CONFIG_PATH, tgt) || + nvlist_string_match(child[c], ZPOOL_CONFIG_DEVID, tgt)) + return (B_TRUE); + } + return (B_FALSE); +} + +static boolean_t +vdev_guid_match(nvlist_t *config, uint64_t guid) +{ + nvlist_t *nvroot; + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + return (nvlist_uint64_match(nvroot, ZPOOL_CONFIG_GUID, guid) || + vdev_child_guid_match(nvroot, guid)); +} + +static boolean_t +vdev_string_match(nvlist_t *config, char *tgt) +{ + nvlist_t *nvroot; + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + return (vdev_child_string_match(nvroot, tgt)); +} + +static boolean_t +pool_match(nvlist_t *config, char *tgt) +{ + uint64_t guid = strtoull(tgt, NULL, 0); + + if (guid != 0) { + return ( + nvlist_uint64_match(config, ZPOOL_CONFIG_POOL_GUID, guid) || + vdev_guid_match(config, guid)); + } else { + return ( + nvlist_string_match(config, ZPOOL_CONFIG_POOL_NAME, tgt) || + vdev_string_match(config, tgt)); + } +} + +static int +find_exported_zpool(char *pool_id, nvlist_t **configp, char *vdev_dir) +{ + nvlist_t *pools; + int error = ENOENT; + nvlist_t *match = NULL; + + if (vdev_dir != NULL) + pools = zpool_find_import_activeok(g_zfs, 1, &vdev_dir); + else + pools = zpool_find_import_activeok(g_zfs, 0, NULL); + + if (pools != NULL) { + nvpair_t *elem = NULL; + + while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { + verify(nvpair_value_nvlist(elem, configp) == 0); + if (pool_match(*configp, pool_id)) { + if (match != NULL) { + (void) fatal( + "More than one matching pool - " + "specify guid/devid/device path."); + } else { + match = *configp; + error = 0; + } + } + } + } + + *configp = error ? NULL : match; + + return (error); +} + +int +main(int argc, char **argv) +{ + int i, c; + struct rlimit rl = { 1024, 1024 }; + spa_t *spa; + objset_t *os = NULL; + char *endstr; + int dump_all = 1; + int verbose = 0; + int error; + int exported = 0; + char *vdev_dir = NULL; + + (void) setrlimit(RLIMIT_NOFILE, &rl); + (void) enable_extended_FILE_stdio(-1, -1); + + dprintf_setup(&argc, argv); + + while ((c = getopt(argc, argv, "udibcsvCLS:U:lRep:")) != -1) { + switch (c) { + case 'u': + case 'd': + case 'i': + case 'b': + case 'c': + case 's': + case 'C': + case 'l': + case 'R': + dump_opt[c]++; + dump_all = 0; + break; + case 'L': + dump_opt[c]++; + break; + case 'v': + verbose++; + break; + case 'U': + spa_config_path = optarg; + break; + case 'e': + exported = 1; + break; + case 'p': + vdev_dir = optarg; + break; + case 'S': + dump_opt[c]++; + dump_all = 0; + zdb_sig_user_data = (strncmp(optarg, "user:", 5) == 0); + if (!zdb_sig_user_data && strncmp(optarg, "all:", 4)) + usage(); + endstr = strchr(optarg, ':') + 1; + if (strcmp(endstr, "fletcher2") == 0) + zdb_sig_cksumalg = ZIO_CHECKSUM_FLETCHER_2; + else if (strcmp(endstr, "fletcher4") == 0) + zdb_sig_cksumalg = ZIO_CHECKSUM_FLETCHER_4; + else if (strcmp(endstr, "sha256") == 0) + zdb_sig_cksumalg = ZIO_CHECKSUM_SHA256; + else if (strcmp(endstr, "all") == 0) + zdb_sig_cksumalg = ZIO_CHECKSUM_FLETCHER_2; + else + usage(); + break; + default: + usage(); + break; + } + } + + if (vdev_dir != NULL && exported == 0) { + (void) fprintf(stderr, "-p option requires use of -e\n"); + usage(); + } + + kernel_init(FREAD); + g_zfs = libzfs_init(); + ASSERT(g_zfs != NULL); + + for (c = 0; c < 256; c++) { + if (dump_all && c != 'l' && c != 'R') + dump_opt[c] = 1; + if (dump_opt[c]) + dump_opt[c] += verbose; + } + + argc -= optind; + argv += optind; + + if (argc < 1) { + if (dump_opt['C']) { + dump_cachefile(spa_config_path); + return (0); + } + usage(); + } + + if (dump_opt['l']) { + dump_label(argv[0]); + return (0); + } + + if (dump_opt['R']) { + flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; + flagbits['c'] = ZDB_FLAG_CHECKSUM; + flagbits['d'] = ZDB_FLAG_DECOMPRESS; + flagbits['e'] = ZDB_FLAG_BSWAP; + flagbits['g'] = ZDB_FLAG_GBH; + flagbits['i'] = ZDB_FLAG_INDIRECT; + flagbits['p'] = ZDB_FLAG_PHYS; + flagbits['r'] = ZDB_FLAG_RAW; + + spa = NULL; + while (argv[0]) { + zdb_read_block(argv[0], &spa); + argv++; + argc--; + } + if (spa) + spa_close(spa, (void *)zdb_read_block); + return (0); + } + + if (dump_opt['C']) + dump_config(argv[0]); + + error = 0; + if (exported) { + /* + * Check to see if the name refers to an exported zpool + */ + char *slash; + nvlist_t *exported_conf = NULL; + + if ((slash = strchr(argv[0], '/')) != NULL) + *slash = '\0'; + + error = find_exported_zpool(argv[0], &exported_conf, vdev_dir); + if (error == 0) { + nvlist_t *nvl = NULL; + + if (vdev_dir != NULL) { + if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) + error = ENOMEM; + else if (nvlist_add_string(nvl, + zpool_prop_to_name(ZPOOL_PROP_ALTROOT), + vdev_dir) != 0) + error = ENOMEM; + } + + if (error == 0) + error = spa_import_faulted(argv[0], + exported_conf, nvl); + + nvlist_free(nvl); + } + + if (slash != NULL) + *slash = '/'; + } + + if (error == 0) { + if (strchr(argv[0], '/') != NULL) { + error = dmu_objset_open(argv[0], DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os); + } else { + error = spa_open(argv[0], &spa, FTAG); + } + } + + if (error) + fatal("can't open %s: %s", argv[0], strerror(error)); + + argv++; + if (--argc > 0) { + zopt_objects = argc; + zopt_object = calloc(zopt_objects, sizeof (uint64_t)); + for (i = 0; i < zopt_objects; i++) { + errno = 0; + zopt_object[i] = strtoull(argv[i], NULL, 0); + if (zopt_object[i] == 0 && errno != 0) + fatal("bad object number %s: %s", + argv[i], strerror(errno)); + } + } + + if (os != NULL) { + dump_dir(os); + dmu_objset_close(os); + } else { + dump_zpool(spa); + spa_close(spa, FTAG); + } + + fuid_table_destroy(); + + libzfs_fini(g_zfs); + kernel_fini(); + + return (0); +} diff --git a/external/cddl/osnet/dist/cmd/zdb/zdb_il.c b/external/cddl/osnet/dist/cmd/zdb/zdb_il.c new file mode 100644 index 000000000000..02d35a050332 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zdb/zdb_il.c @@ -0,0 +1,363 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Print intent log header and statistics. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern uint8_t dump_opt[256]; + +static void +print_log_bp(const blkptr_t *bp, const char *prefix) +{ + char blkbuf[BP_SPRINTF_LEN]; + + sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, bp); + (void) printf("%s%s\n", prefix, blkbuf); +} + +/* ARGSUSED */ +static void +zil_prt_rec_create(zilog_t *zilog, int txtype, lr_create_t *lr) +{ + time_t crtime = lr->lr_crtime[0]; + char *name = (char *)(lr + 1); + char *link = name + strlen(name) + 1; + + if (txtype == TX_SYMLINK) + (void) printf("\t\t\t%s -> %s\n", name, link); + else + (void) printf("\t\t\t%s\n", name); + + (void) printf("\t\t\t%s", ctime(&crtime)); + (void) printf("\t\t\tdoid %llu, foid %llu, mode %llo\n", + (u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_foid, + (longlong_t)lr->lr_mode); + (void) printf("\t\t\tuid %llu, gid %llu, gen %llu, rdev 0x%llx\n", + (u_longlong_t)lr->lr_uid, (u_longlong_t)lr->lr_gid, + (u_longlong_t)lr->lr_gen, (u_longlong_t)lr->lr_rdev); +} + +/* ARGSUSED */ +static void +zil_prt_rec_remove(zilog_t *zilog, int txtype, lr_remove_t *lr) +{ + (void) printf("\t\t\tdoid %llu, name %s\n", + (u_longlong_t)lr->lr_doid, (char *)(lr + 1)); +} + +/* ARGSUSED */ +static void +zil_prt_rec_link(zilog_t *zilog, int txtype, lr_link_t *lr) +{ + (void) printf("\t\t\tdoid %llu, link_obj %llu, name %s\n", + (u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj, + (char *)(lr + 1)); +} + +/* ARGSUSED */ +static void +zil_prt_rec_rename(zilog_t *zilog, int txtype, lr_rename_t *lr) +{ + char *snm = (char *)(lr + 1); + char *tnm = snm + strlen(snm) + 1; + + (void) printf("\t\t\tsdoid %llu, tdoid %llu\n", + (u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid); + (void) printf("\t\t\tsrc %s tgt %s\n", snm, tnm); +} + +/* ARGSUSED */ +static void +zil_prt_rec_write(zilog_t *zilog, int txtype, lr_write_t *lr) +{ + char *data, *dlimit; + blkptr_t *bp = &lr->lr_blkptr; + char buf[SPA_MAXBLOCKSIZE]; + int verbose = MAX(dump_opt['d'], dump_opt['i']); + int error; + + (void) printf("\t\t\tfoid %llu, offset 0x%llx," + " length 0x%llx, blkoff 0x%llx\n", + (u_longlong_t)lr->lr_foid, (longlong_t)lr->lr_offset, + (u_longlong_t)lr->lr_length, (u_longlong_t)lr->lr_blkoff); + + if (verbose < 5) + return; + + if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { + (void) printf("\t\t\thas blkptr, %s\n", + bp->blk_birth >= spa_first_txg(zilog->zl_spa) ? + "will claim" : "won't claim"); + print_log_bp(bp, "\t\t\t"); + if (bp->blk_birth == 0) { + bzero(buf, sizeof (buf)); + } else { + zbookmark_t zb; + + ASSERT3U(bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], ==, + dmu_objset_id(zilog->zl_os)); + + zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET]; + zb.zb_object = 0; + zb.zb_level = -1; + zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; + + error = zio_wait(zio_read(NULL, zilog->zl_spa, + bp, buf, BP_GET_LSIZE(bp), NULL, NULL, + ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &zb)); + if (error) + return; + } + data = buf + lr->lr_blkoff; + } else { + data = (char *)(lr + 1); + } + + dlimit = data + MIN(lr->lr_length, + (verbose < 6 ? 20 : SPA_MAXBLOCKSIZE)); + + (void) printf("\t\t\t"); + while (data < dlimit) { + if (isprint(*data)) + (void) printf("%c ", *data); + else + (void) printf("%2X", *data); + data++; + } + (void) printf("\n"); +} + +/* ARGSUSED */ +static void +zil_prt_rec_truncate(zilog_t *zilog, int txtype, lr_truncate_t *lr) +{ + (void) printf("\t\t\tfoid %llu, offset 0x%llx, length 0x%llx\n", + (u_longlong_t)lr->lr_foid, (longlong_t)lr->lr_offset, + (u_longlong_t)lr->lr_length); +} + +/* ARGSUSED */ +static void +zil_prt_rec_setattr(zilog_t *zilog, int txtype, lr_setattr_t *lr) +{ + time_t atime = (time_t)lr->lr_atime[0]; + time_t mtime = (time_t)lr->lr_mtime[0]; + + (void) printf("\t\t\tfoid %llu, mask 0x%llx\n", + (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_mask); + + if (lr->lr_mask & AT_MODE) { + (void) printf("\t\t\tAT_MODE %llo\n", + (longlong_t)lr->lr_mode); + } + + if (lr->lr_mask & AT_UID) { + (void) printf("\t\t\tAT_UID %llu\n", + (u_longlong_t)lr->lr_uid); + } + + if (lr->lr_mask & AT_GID) { + (void) printf("\t\t\tAT_GID %llu\n", + (u_longlong_t)lr->lr_gid); + } + + if (lr->lr_mask & AT_SIZE) { + (void) printf("\t\t\tAT_SIZE %llu\n", + (u_longlong_t)lr->lr_size); + } + + if (lr->lr_mask & AT_ATIME) { + (void) printf("\t\t\tAT_ATIME %llu.%09llu %s", + (u_longlong_t)lr->lr_atime[0], + (u_longlong_t)lr->lr_atime[1], + ctime(&atime)); + } + + if (lr->lr_mask & AT_MTIME) { + (void) printf("\t\t\tAT_MTIME %llu.%09llu %s", + (u_longlong_t)lr->lr_mtime[0], + (u_longlong_t)lr->lr_mtime[1], + ctime(&mtime)); + } +} + +/* ARGSUSED */ +static void +zil_prt_rec_acl(zilog_t *zilog, int txtype, lr_acl_t *lr) +{ + (void) printf("\t\t\tfoid %llu, aclcnt %llu\n", + (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_aclcnt); +} + +typedef void (*zil_prt_rec_func_t)(); +typedef struct zil_rec_info { + zil_prt_rec_func_t zri_print; + char *zri_name; + uint64_t zri_count; +} zil_rec_info_t; + +static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = { + { NULL, "Total " }, + { zil_prt_rec_create, "TX_CREATE " }, + { zil_prt_rec_create, "TX_MKDIR " }, + { zil_prt_rec_create, "TX_MKXATTR " }, + { zil_prt_rec_create, "TX_SYMLINK " }, + { zil_prt_rec_remove, "TX_REMOVE " }, + { zil_prt_rec_remove, "TX_RMDIR " }, + { zil_prt_rec_link, "TX_LINK " }, + { zil_prt_rec_rename, "TX_RENAME " }, + { zil_prt_rec_write, "TX_WRITE " }, + { zil_prt_rec_truncate, "TX_TRUNCATE " }, + { zil_prt_rec_setattr, "TX_SETATTR " }, + { zil_prt_rec_acl, "TX_ACL_V0 " }, + { zil_prt_rec_acl, "TX_ACL_ACL " }, + { zil_prt_rec_create, "TX_CREATE_ACL " }, + { zil_prt_rec_create, "TX_CREATE_ATTR " }, + { zil_prt_rec_create, "TX_CREATE_ACL_ATTR " }, + { zil_prt_rec_create, "TX_MKDIR_ACL " }, + { zil_prt_rec_create, "TX_MKDIR_ATTR " }, + { zil_prt_rec_create, "TX_MKDIR_ACL_ATTR " }, +}; + +/* ARGSUSED */ +static void +print_log_record(zilog_t *zilog, lr_t *lr, void *arg, uint64_t claim_txg) +{ + int txtype; + int verbose = MAX(dump_opt['d'], dump_opt['i']); + + /* reduce size of txtype to strip off TX_CI bit */ + txtype = lr->lrc_txtype; + + ASSERT(txtype != 0 && (uint_t)txtype < TX_MAX_TYPE); + ASSERT(lr->lrc_txg); + + (void) printf("\t\t%s%s len %6llu, txg %llu, seq %llu\n", + (lr->lrc_txtype & TX_CI) ? "CI-" : "", + zil_rec_info[txtype].zri_name, + (u_longlong_t)lr->lrc_reclen, + (u_longlong_t)lr->lrc_txg, + (u_longlong_t)lr->lrc_seq); + + if (txtype && verbose >= 3) + zil_rec_info[txtype].zri_print(zilog, txtype, lr); + + zil_rec_info[txtype].zri_count++; + zil_rec_info[0].zri_count++; +} + +/* ARGSUSED */ +static void +print_log_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) +{ + char blkbuf[BP_SPRINTF_LEN]; + int verbose = MAX(dump_opt['d'], dump_opt['i']); + char *claim; + + if (verbose <= 3) + return; + + if (verbose >= 5) { + (void) strcpy(blkbuf, ", "); + sprintf_blkptr(blkbuf + strlen(blkbuf), + BP_SPRINTF_LEN - strlen(blkbuf), bp); + } else { + blkbuf[0] = '\0'; + } + + if (claim_txg != 0) + claim = "already claimed"; + else if (bp->blk_birth >= spa_first_txg(zilog->zl_spa)) + claim = "will claim"; + else + claim = "won't claim"; + + (void) printf("\tBlock seqno %llu, %s%s\n", + (u_longlong_t)bp->blk_cksum.zc_word[ZIL_ZC_SEQ], claim, blkbuf); +} + +static void +print_log_stats(int verbose) +{ + int i, w, p10; + + if (verbose > 3) + (void) printf("\n"); + + if (zil_rec_info[0].zri_count == 0) + return; + + for (w = 1, p10 = 10; zil_rec_info[0].zri_count >= p10; p10 *= 10) + w++; + + for (i = 0; i < TX_MAX_TYPE; i++) + if (zil_rec_info[i].zri_count || verbose >= 3) + (void) printf("\t\t%s %*llu\n", + zil_rec_info[i].zri_name, w, + (u_longlong_t)zil_rec_info[i].zri_count); + (void) printf("\n"); +} + +/* ARGSUSED */ +void +dump_intent_log(zilog_t *zilog) +{ + const zil_header_t *zh = zilog->zl_header; + int verbose = MAX(dump_opt['d'], dump_opt['i']); + int i; + + if (zh->zh_log.blk_birth == 0 || verbose < 2) + return; + + (void) printf("\n ZIL header: claim_txg %llu, seq %llu\n", + (u_longlong_t)zh->zh_claim_txg, (u_longlong_t)zh->zh_replay_seq); + + if (verbose >= 4) + print_log_bp(&zh->zh_log, "\n\tfirst block: "); + + for (i = 0; i < TX_MAX_TYPE; i++) + zil_rec_info[i].zri_count = 0; + + if (verbose >= 2) { + (void) printf("\n"); + (void) zil_parse(zilog, print_log_block, print_log_record, NULL, + zh->zh_claim_txg); + print_log_stats(verbose); + } +} diff --git a/external/cddl/osnet/dist/cmd/zfs/zfs_iter.c b/external/cddl/osnet/dist/cmd/zfs/zfs_iter.c new file mode 100644 index 000000000000..a22370a02795 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zfs/zfs_iter.c @@ -0,0 +1,420 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "zfs_util.h" +#include "zfs_iter.h" + +/* + * This is a private interface used to gather up all the datasets specified on + * the command line so that we can iterate over them in order. + * + * First, we iterate over all filesystems, gathering them together into an + * AVL tree. We report errors for any explicitly specified datasets + * that we couldn't open. + * + * When finished, we have an AVL tree of ZFS handles. We go through and execute + * the provided callback for each one, passing whatever data the user supplied. + */ + +typedef struct zfs_node { + zfs_handle_t *zn_handle; + uu_avl_node_t zn_avlnode; +} zfs_node_t; + +typedef struct callback_data { + uu_avl_t *cb_avl; + int cb_flags; + zfs_type_t cb_types; + zfs_sort_column_t *cb_sortcol; + zprop_list_t **cb_proplist; +} callback_data_t; + +uu_avl_pool_t *avl_pool; + +/* + * Include snaps if they were requested or if this a zfs list where types + * were not specified and the "listsnapshots" property is set on this pool. + */ +static int +zfs_include_snapshots(zfs_handle_t *zhp, callback_data_t *cb) +{ + zpool_handle_t *zph; + + if ((cb->cb_flags & ZFS_ITER_PROP_LISTSNAPS) == 0) + return (cb->cb_types & ZFS_TYPE_SNAPSHOT); + + zph = zfs_get_pool_handle(zhp); + return (zpool_get_prop_int(zph, ZPOOL_PROP_LISTSNAPS, NULL)); +} + +/* + * Called for each dataset. If the object is of an appropriate type, + * add it to the avl tree and recurse over any children as necessary. + */ +static int +zfs_callback(zfs_handle_t *zhp, void *data) +{ + callback_data_t *cb = data; + int dontclose = 0; + int include_snaps = zfs_include_snapshots(zhp, cb); + + if ((zfs_get_type(zhp) & cb->cb_types) || + ((zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) && include_snaps)) { + uu_avl_index_t idx; + zfs_node_t *node = safe_malloc(sizeof (zfs_node_t)); + + node->zn_handle = zhp; + uu_avl_node_init(node, &node->zn_avlnode, avl_pool); + if (uu_avl_find(cb->cb_avl, node, cb->cb_sortcol, + &idx) == NULL) { + if (cb->cb_proplist && + zfs_expand_proplist(zhp, cb->cb_proplist) != 0) { + free(node); + return (-1); + } + uu_avl_insert(cb->cb_avl, node, idx); + dontclose = 1; + } else { + free(node); + } + } + + /* + * Recurse if necessary. + */ + if (cb->cb_flags & ZFS_ITER_RECURSE) { + if (zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) + (void) zfs_iter_filesystems(zhp, zfs_callback, data); + if ((zfs_get_type(zhp) != ZFS_TYPE_SNAPSHOT) && include_snaps) + (void) zfs_iter_snapshots(zhp, zfs_callback, data); + } + + if (!dontclose) + zfs_close(zhp); + + return (0); +} + +int +zfs_add_sort_column(zfs_sort_column_t **sc, const char *name, + boolean_t reverse) +{ + zfs_sort_column_t *col; + zfs_prop_t prop; + + if ((prop = zfs_name_to_prop(name)) == ZPROP_INVAL && + !zfs_prop_user(name)) + return (-1); + + col = safe_malloc(sizeof (zfs_sort_column_t)); + + col->sc_prop = prop; + col->sc_reverse = reverse; + if (prop == ZPROP_INVAL) { + col->sc_user_prop = safe_malloc(strlen(name) + 1); + (void) strcpy(col->sc_user_prop, name); + } + + if (*sc == NULL) { + col->sc_last = col; + *sc = col; + } else { + (*sc)->sc_last->sc_next = col; + (*sc)->sc_last = col; + } + + return (0); +} + +void +zfs_free_sort_columns(zfs_sort_column_t *sc) +{ + zfs_sort_column_t *col; + + while (sc != NULL) { + col = sc->sc_next; + free(sc->sc_user_prop); + free(sc); + sc = col; + } +} + +/* ARGSUSED */ +static int +zfs_compare(const void *larg, const void *rarg, void *unused) +{ + zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle; + zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle; + const char *lname = zfs_get_name(l); + const char *rname = zfs_get_name(r); + char *lat, *rat; + uint64_t lcreate, rcreate; + int ret; + + lat = (char *)strchr(lname, '@'); + rat = (char *)strchr(rname, '@'); + + if (lat != NULL) + *lat = '\0'; + if (rat != NULL) + *rat = '\0'; + + ret = strcmp(lname, rname); + if (ret == 0) { + /* + * If we're comparing a dataset to one of its snapshots, we + * always make the full dataset first. + */ + if (lat == NULL) { + ret = -1; + } else if (rat == NULL) { + ret = 1; + } else { + /* + * If we have two snapshots from the same dataset, then + * we want to sort them according to creation time. We + * use the hidden CREATETXG property to get an absolute + * ordering of snapshots. + */ + lcreate = zfs_prop_get_int(l, ZFS_PROP_CREATETXG); + rcreate = zfs_prop_get_int(r, ZFS_PROP_CREATETXG); + + if (lcreate < rcreate) + ret = -1; + else if (lcreate > rcreate) + ret = 1; + } + } + + if (lat != NULL) + *lat = '@'; + if (rat != NULL) + *rat = '@'; + + return (ret); +} + +/* + * Sort datasets by specified columns. + * + * o Numeric types sort in ascending order. + * o String types sort in alphabetical order. + * o Types inappropriate for a row sort that row to the literal + * bottom, regardless of the specified ordering. + * + * If no sort columns are specified, or two datasets compare equally + * across all specified columns, they are sorted alphabetically by name + * with snapshots grouped under their parents. + */ +static int +zfs_sort(const void *larg, const void *rarg, void *data) +{ + zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle; + zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle; + zfs_sort_column_t *sc = (zfs_sort_column_t *)data; + zfs_sort_column_t *psc; + + for (psc = sc; psc != NULL; psc = psc->sc_next) { + char lbuf[ZFS_MAXPROPLEN], rbuf[ZFS_MAXPROPLEN]; + char *lstr, *rstr; + uint64_t lnum, rnum; + boolean_t lvalid, rvalid; + int ret = 0; + + /* + * We group the checks below the generic code. If 'lstr' and + * 'rstr' are non-NULL, then we do a string based comparison. + * Otherwise, we compare 'lnum' and 'rnum'. + */ + lstr = rstr = NULL; + if (psc->sc_prop == ZPROP_INVAL) { + nvlist_t *luser, *ruser; + nvlist_t *lval, *rval; + + luser = zfs_get_user_props(l); + ruser = zfs_get_user_props(r); + + lvalid = (nvlist_lookup_nvlist(luser, + psc->sc_user_prop, &lval) == 0); + rvalid = (nvlist_lookup_nvlist(ruser, + psc->sc_user_prop, &rval) == 0); + + if (lvalid) + verify(nvlist_lookup_string(lval, + ZPROP_VALUE, &lstr) == 0); + if (rvalid) + verify(nvlist_lookup_string(rval, + ZPROP_VALUE, &rstr) == 0); + + } else if (zfs_prop_is_string(psc->sc_prop)) { + lvalid = (zfs_prop_get(l, psc->sc_prop, lbuf, + sizeof (lbuf), NULL, NULL, 0, B_TRUE) == 0); + rvalid = (zfs_prop_get(r, psc->sc_prop, rbuf, + sizeof (rbuf), NULL, NULL, 0, B_TRUE) == 0); + + lstr = lbuf; + rstr = rbuf; + } else { + lvalid = zfs_prop_valid_for_type(psc->sc_prop, + zfs_get_type(l)); + rvalid = zfs_prop_valid_for_type(psc->sc_prop, + zfs_get_type(r)); + + if (lvalid) + (void) zfs_prop_get_numeric(l, psc->sc_prop, + &lnum, NULL, NULL, 0); + if (rvalid) + (void) zfs_prop_get_numeric(r, psc->sc_prop, + &rnum, NULL, NULL, 0); + } + + if (!lvalid && !rvalid) + continue; + else if (!lvalid) + return (1); + else if (!rvalid) + return (-1); + + if (lstr) + ret = strcmp(lstr, rstr); + else if (lnum < rnum) + ret = -1; + else if (lnum > rnum) + ret = 1; + + if (ret != 0) { + if (psc->sc_reverse == B_TRUE) + ret = (ret < 0) ? 1 : -1; + return (ret); + } + } + + return (zfs_compare(larg, rarg, NULL)); +} + +int +zfs_for_each(int argc, char **argv, int flags, zfs_type_t types, + zfs_sort_column_t *sortcol, zprop_list_t **proplist, + zfs_iter_f callback, void *data) +{ + callback_data_t cb; + int ret = 0; + zfs_node_t *node; + uu_avl_walk_t *walk; + + avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t), + offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT); + + if (avl_pool == NULL) { + (void) fprintf(stderr, + gettext("internal error: out of memory\n")); + exit(1); + } + + cb.cb_sortcol = sortcol; + cb.cb_flags = flags; + cb.cb_proplist = proplist; + cb.cb_types = types; + if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) { + (void) fprintf(stderr, + gettext("internal error: out of memory\n")); + exit(1); + } + + if (argc == 0) { + /* + * If given no arguments, iterate over all datasets. + */ + cb.cb_flags |= ZFS_ITER_RECURSE; + ret = zfs_iter_root(g_zfs, zfs_callback, &cb); + } else { + int i; + zfs_handle_t *zhp; + zfs_type_t argtype; + + /* + * If we're recursive, then we always allow filesystems as + * arguments. If we also are interested in snapshots, then we + * can take volumes as well. + */ + argtype = types; + if (flags & ZFS_ITER_RECURSE) { + argtype |= ZFS_TYPE_FILESYSTEM; + if (types & ZFS_TYPE_SNAPSHOT) + argtype |= ZFS_TYPE_VOLUME; + } + + for (i = 0; i < argc; i++) { + if (flags & ZFS_ITER_ARGS_CAN_BE_PATHS) { + zhp = zfs_path_to_zhandle(g_zfs, argv[i], + argtype); + } else { + zhp = zfs_open(g_zfs, argv[i], argtype); + } + if (zhp != NULL) + ret |= zfs_callback(zhp, &cb); + else + ret = 1; + } + } + + /* + * At this point we've got our AVL tree full of zfs handles, so iterate + * over each one and execute the real user callback. + */ + for (node = uu_avl_first(cb.cb_avl); node != NULL; + node = uu_avl_next(cb.cb_avl, node)) + ret |= callback(node->zn_handle, data); + + /* + * Finally, clean up the AVL tree. + */ + if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) { + (void) fprintf(stderr, + gettext("internal error: out of memory")); + exit(1); + } + + while ((node = uu_avl_walk_next(walk)) != NULL) { + uu_avl_remove(cb.cb_avl, node); + zfs_close(node->zn_handle); + free(node); + } + + uu_avl_walk_end(walk); + uu_avl_destroy(cb.cb_avl); + uu_avl_pool_destroy(avl_pool); + + return (ret); +} diff --git a/external/cddl/osnet/dist/cmd/zfs/zfs_iter.h b/external/cddl/osnet/dist/cmd/zfs/zfs_iter.h new file mode 100644 index 000000000000..76a11085a1ef --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zfs/zfs_iter.h @@ -0,0 +1,54 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef ZFS_ITER_H +#define ZFS_ITER_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct zfs_sort_column { + struct zfs_sort_column *sc_next; + struct zfs_sort_column *sc_last; + zfs_prop_t sc_prop; + char *sc_user_prop; + boolean_t sc_reverse; +} zfs_sort_column_t; + +#define ZFS_ITER_RECURSE (1 << 0) +#define ZFS_ITER_ARGS_CAN_BE_PATHS (1 << 1) +#define ZFS_ITER_PROP_LISTSNAPS (1 << 2) + +int zfs_for_each(int, char **, int options, zfs_type_t, + zfs_sort_column_t *, zprop_list_t **, zfs_iter_f, void *); +int zfs_add_sort_column(zfs_sort_column_t **, const char *, boolean_t); +void zfs_free_sort_columns(zfs_sort_column_t *); + +#ifdef __cplusplus +} +#endif + +#endif /* ZFS_ITER_H */ diff --git a/external/cddl/osnet/dist/cmd/zfs/zfs_main.c b/external/cddl/osnet/dist/cmd/zfs/zfs_main.c new file mode 100644 index 000000000000..a343b5c563fb --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zfs/zfs_main.c @@ -0,0 +1,4229 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "zfs_iter.h" +#include "zfs_util.h" + +libzfs_handle_t *g_zfs; + +static FILE *mnttab_file; +static char history_str[HIS_MAX_RECORD_LEN]; + +static int zfs_do_clone(int argc, char **argv); +static int zfs_do_create(int argc, char **argv); +static int zfs_do_destroy(int argc, char **argv); +static int zfs_do_get(int argc, char **argv); +static int zfs_do_inherit(int argc, char **argv); +static int zfs_do_list(int argc, char **argv); +static int zfs_do_mount(int argc, char **argv); +static int zfs_do_rename(int argc, char **argv); +static int zfs_do_rollback(int argc, char **argv); +static int zfs_do_set(int argc, char **argv); +static int zfs_do_upgrade(int argc, char **argv); +static int zfs_do_snapshot(int argc, char **argv); +static int zfs_do_unmount(int argc, char **argv); +static int zfs_do_share(int argc, char **argv); +static int zfs_do_unshare(int argc, char **argv); +static int zfs_do_send(int argc, char **argv); +static int zfs_do_receive(int argc, char **argv); +static int zfs_do_promote(int argc, char **argv); +static int zfs_do_allow(int argc, char **argv); +static int zfs_do_unallow(int argc, char **argv); + +/* + * Enable a reasonable set of defaults for libumem debugging on DEBUG builds. + */ + +#ifdef DEBUG +const char * +_umem_debug_init(void) +{ + return ("default,verbose"); /* $UMEM_DEBUG setting */ +} + +const char * +_umem_logging_init(void) +{ + return ("fail,contents"); /* $UMEM_LOGGING setting */ +} +#endif + +typedef enum { + HELP_CLONE, + HELP_CREATE, + HELP_DESTROY, + HELP_GET, + HELP_INHERIT, + HELP_UPGRADE, + HELP_LIST, + HELP_MOUNT, + HELP_PROMOTE, + HELP_RECEIVE, + HELP_RENAME, + HELP_ROLLBACK, + HELP_SEND, + HELP_SET, + HELP_SHARE, + HELP_SNAPSHOT, + HELP_UNMOUNT, + HELP_UNSHARE, + HELP_ALLOW, + HELP_UNALLOW +} zfs_help_t; + +typedef struct zfs_command { + const char *name; + int (*func)(int argc, char **argv); + zfs_help_t usage; +} zfs_command_t; + +/* + * Master command table. Each ZFS command has a name, associated function, and + * usage message. The usage messages need to be internationalized, so we have + * to have a function to return the usage message based on a command index. + * + * These commands are organized according to how they are displayed in the usage + * message. An empty command (one with a NULL name) indicates an empty line in + * the generic usage message. + */ +static zfs_command_t command_table[] = { + { "create", zfs_do_create, HELP_CREATE }, + { "destroy", zfs_do_destroy, HELP_DESTROY }, + { NULL }, + { "snapshot", zfs_do_snapshot, HELP_SNAPSHOT }, + { "rollback", zfs_do_rollback, HELP_ROLLBACK }, + { "clone", zfs_do_clone, HELP_CLONE }, + { "promote", zfs_do_promote, HELP_PROMOTE }, + { "rename", zfs_do_rename, HELP_RENAME }, + { NULL }, + { "list", zfs_do_list, HELP_LIST }, + { NULL }, + { "set", zfs_do_set, HELP_SET }, + { "get", zfs_do_get, HELP_GET }, + { "inherit", zfs_do_inherit, HELP_INHERIT }, + { "upgrade", zfs_do_upgrade, HELP_UPGRADE }, + { NULL }, + { "mount", zfs_do_mount, HELP_MOUNT }, + { "unmount", zfs_do_unmount, HELP_UNMOUNT }, + { "share", zfs_do_share, HELP_SHARE }, + { "unshare", zfs_do_unshare, HELP_UNSHARE }, + { NULL }, + { "send", zfs_do_send, HELP_SEND }, + { "receive", zfs_do_receive, HELP_RECEIVE }, + { NULL }, + { "allow", zfs_do_allow, HELP_ALLOW }, + { NULL }, + { "unallow", zfs_do_unallow, HELP_UNALLOW }, +}; + +#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0])) + +zfs_command_t *current_command; + +static const char * +get_usage(zfs_help_t idx) +{ + switch (idx) { + case HELP_CLONE: + return (gettext("\tclone [-p] [-o property=value] ... " + " \n")); + case HELP_CREATE: + return (gettext("\tcreate [-p] [-o property=value] ... " + "\n" + "\tcreate [-ps] [-b blocksize] [-o property=value] ... " + "-V \n")); + case HELP_DESTROY: + return (gettext("\tdestroy [-rRf] " + "\n")); + case HELP_GET: + return (gettext("\tget [-rHp] [-o field[,...]] " + "[-s source[,...]]\n" + "\t <\"all\" | property[,...]> " + "[filesystem|volume|snapshot] ...\n")); + case HELP_INHERIT: + return (gettext("\tinherit [-r] " + " ...\n")); + case HELP_UPGRADE: + return (gettext("\tupgrade [-v]\n" + "\tupgrade [-r] [-V version] <-a | filesystem ...>\n")); + case HELP_LIST: + return (gettext("\tlist [-rH] [-o property[,...]] " + "[-t type[,...]] [-s property] ...\n" + "\t [-S property] ... " + "[filesystem|volume|snapshot] ...\n")); + case HELP_MOUNT: + return (gettext("\tmount\n" + "\tmount [-vO] [-o opts] <-a | filesystem>\n")); + case HELP_PROMOTE: + return (gettext("\tpromote \n")); + case HELP_RECEIVE: + return (gettext("\treceive [-vnF] \n" + "\treceive [-vnF] -d \n")); + case HELP_RENAME: + return (gettext("\trename " + "\n" + "\trename -p \n" + "\trename -r ")); + case HELP_ROLLBACK: + return (gettext("\trollback [-rRf] \n")); + case HELP_SEND: + return (gettext("\tsend [-R] [-[iI] snapshot] \n")); + case HELP_SET: + return (gettext("\tset " + " ...\n")); + case HELP_SHARE: + return (gettext("\tshare <-a | filesystem>\n")); + case HELP_SNAPSHOT: + return (gettext("\tsnapshot [-r] [-o property=value] ... " + "\n")); + case HELP_UNMOUNT: + return (gettext("\tunmount [-f] " + "<-a | filesystem|mountpoint>\n")); + case HELP_UNSHARE: + return (gettext("\tunshare [-f] " + "<-a | filesystem|mountpoint>\n")); + case HELP_ALLOW: + return (gettext("\tallow [-ldug] " + "<\"everyone\"|user|group>[,...] [,...]\n" + "\t \n" + "\tallow [-ld] -e [,...] " + "\n" + "\tallow -c [,...] \n" + "\tallow -s @setname [,...] " + "\n")); + case HELP_UNALLOW: + return (gettext("\tunallow [-rldug] " + "<\"everyone\"|user|group>[,...]\n" + "\t [[,...]] \n" + "\tunallow [-rld] -e [[,...]] " + "\n" + "\tunallow [-r] -c [[,...]] " + "\n" + "\tunallow [-r] -s @setname [[,...]] " + "\n")); + } + + abort(); + /* NOTREACHED */ +} + +/* + * Utility function to guarantee malloc() success. + */ +void * +safe_malloc(size_t size) +{ + void *data; + + if ((data = calloc(1, size)) == NULL) { + (void) fprintf(stderr, "internal error: out of memory\n"); + exit(1); + } + + return (data); +} + +/* + * Callback routine that will print out information for each of + * the properties. + */ +static int +usage_prop_cb(int prop, void *cb) +{ + FILE *fp = cb; + + (void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop)); + + if (zfs_prop_readonly(prop)) + (void) fprintf(fp, " NO "); + else + (void) fprintf(fp, "YES "); + + if (zfs_prop_inheritable(prop)) + (void) fprintf(fp, " YES "); + else + (void) fprintf(fp, " NO "); + + if (zfs_prop_values(prop) == NULL) + (void) fprintf(fp, "-\n"); + else + (void) fprintf(fp, "%s\n", zfs_prop_values(prop)); + + return (ZPROP_CONT); +} + +/* + * Display usage message. If we're inside a command, display only the usage for + * that command. Otherwise, iterate over the entire command table and display + * a complete usage message. + */ +static void +usage(boolean_t requested) +{ + int i; + boolean_t show_properties = B_FALSE; + boolean_t show_permissions = B_FALSE; + FILE *fp = requested ? stdout : stderr; + + if (current_command == NULL) { + + (void) fprintf(fp, gettext("usage: zfs command args ...\n")); + (void) fprintf(fp, + gettext("where 'command' is one of the following:\n\n")); + + for (i = 0; i < NCOMMAND; i++) { + if (command_table[i].name == NULL) + (void) fprintf(fp, "\n"); + else + (void) fprintf(fp, "%s", + get_usage(command_table[i].usage)); + } + + (void) fprintf(fp, gettext("\nEach dataset is of the form: " + "pool/[dataset/]*dataset[@name]\n")); + } else { + (void) fprintf(fp, gettext("usage:\n")); + (void) fprintf(fp, "%s", get_usage(current_command->usage)); + } + + if (current_command != NULL && + (strcmp(current_command->name, "set") == 0 || + strcmp(current_command->name, "get") == 0 || + strcmp(current_command->name, "inherit") == 0 || + strcmp(current_command->name, "list") == 0)) + show_properties = B_TRUE; + + if (current_command != NULL && + (strcmp(current_command->name, "allow") == 0 || + strcmp(current_command->name, "unallow") == 0)) + show_permissions = B_TRUE; + + if (show_properties) { + + (void) fprintf(fp, + gettext("\nThe following properties are supported:\n")); + + (void) fprintf(fp, "\n\t%-14s %s %s %s\n\n", + "PROPERTY", "EDIT", "INHERIT", "VALUES"); + + /* Iterate over all properties */ + (void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE, + ZFS_TYPE_DATASET); + + (void) fprintf(fp, gettext("\nSizes are specified in bytes " + "with standard units such as K, M, G, etc.\n")); + (void) fprintf(fp, gettext("\nUser-defined properties can " + "be specified by using a name containing a colon (:).\n")); + + } else if (show_permissions) { + (void) fprintf(fp, + gettext("\nThe following permissions are supported:\n")); + + zfs_deleg_permissions(); + } else { + /* + * TRANSLATION NOTE: + * "zfs set|get" must not be localised this is the + * command name and arguments. + */ + + (void) fprintf(fp, + gettext("\nFor the property list, run: zfs set|get\n")); + + (void) fprintf(fp, + gettext("\nFor the delegated permission list, run:" + " zfs allow|unallow\n")); + } + + /* + * See comments at end of main(). + */ + if (getenv("ZFS_ABORT") != NULL) { + (void) printf("dumping core by request\n"); + abort(); + } + + exit(requested ? 0 : 2); +} + +static int +parseprop(nvlist_t *props) +{ + char *propname = optarg; + char *propval, *strval; + + if ((propval = strchr(propname, '=')) == NULL) { + (void) fprintf(stderr, gettext("missing " + "'=' for -o option\n")); + return (-1); + } + *propval = '\0'; + propval++; + if (nvlist_lookup_string(props, propname, &strval) == 0) { + (void) fprintf(stderr, gettext("property '%s' " + "specified multiple times\n"), propname); + return (-1); + } + if (nvlist_add_string(props, propname, propval) != 0) { + (void) fprintf(stderr, gettext("internal " + "error: out of memory\n")); + return (-1); + } + return (0); + +} + +/* + * zfs clone [-p] [-o prop=value] ... + * + * Given an existing dataset, create a writable copy whose initial contents + * are the same as the source. The newly created dataset maintains a + * dependency on the original; the original cannot be destroyed so long as + * the clone exists. + * + * The '-p' flag creates all the non-existing ancestors of the target first. + */ +static int +zfs_do_clone(int argc, char **argv) +{ + zfs_handle_t *zhp = NULL; + boolean_t parents = B_FALSE; + nvlist_t *props; + int ret; + int c; + + if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { + (void) fprintf(stderr, gettext("internal error: " + "out of memory\n")); + return (1); + } + + /* check options */ + while ((c = getopt(argc, argv, "o:p")) != -1) { + switch (c) { + case 'o': + if (parseprop(props)) + return (1); + break; + case 'p': + parents = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + goto usage; + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing source dataset " + "argument\n")); + goto usage; + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing target dataset " + "argument\n")); + goto usage; + } + if (argc > 2) { + (void) fprintf(stderr, gettext("too many arguments\n")); + goto usage; + } + + /* open the source dataset */ + if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) + return (1); + + if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM | + ZFS_TYPE_VOLUME)) { + /* + * Now create the ancestors of the target dataset. If the + * target already exists and '-p' option was used we should not + * complain. + */ + if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | + ZFS_TYPE_VOLUME)) + return (0); + if (zfs_create_ancestors(g_zfs, argv[1]) != 0) + return (1); + } + + /* pass to libzfs */ + ret = zfs_clone(zhp, argv[1], props); + + /* create the mountpoint if necessary */ + if (ret == 0) { + zfs_handle_t *clone; + + clone = zfs_open(g_zfs, argv[1], ZFS_TYPE_DATASET); + if (clone != NULL) { + if ((ret = zfs_mount(clone, NULL, 0)) == 0) + ret = zfs_share(clone); + zfs_close(clone); + } + } + + zfs_close(zhp); + nvlist_free(props); + + return (!!ret); + +usage: + if (zhp) + zfs_close(zhp); + nvlist_free(props); + usage(B_FALSE); + return (-1); +} + +/* + * zfs create [-p] [-o prop=value] ... fs + * zfs create [-ps] [-b blocksize] [-o prop=value] ... -V vol size + * + * Create a new dataset. This command can be used to create filesystems + * and volumes. Snapshot creation is handled by 'zfs snapshot'. + * For volumes, the user must specify a size to be used. + * + * The '-s' flag applies only to volumes, and indicates that we should not try + * to set the reservation for this volume. By default we set a reservation + * equal to the size for any volume. For pools with SPA_VERSION >= + * SPA_VERSION_REFRESERVATION, we set a refreservation instead. + * + * The '-p' flag creates all the non-existing ancestors of the target first. + */ +static int +zfs_do_create(int argc, char **argv) +{ + zfs_type_t type = ZFS_TYPE_FILESYSTEM; + zfs_handle_t *zhp = NULL; + uint64_t volsize; + int c; + boolean_t noreserve = B_FALSE; + boolean_t bflag = B_FALSE; + boolean_t parents = B_FALSE; + int ret = 1; + nvlist_t *props; + uint64_t intval; + int canmount; + + if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { + (void) fprintf(stderr, gettext("internal error: " + "out of memory\n")); + return (1); + } + + /* check options */ + while ((c = getopt(argc, argv, ":V:b:so:p")) != -1) { + switch (c) { + case 'V': + type = ZFS_TYPE_VOLUME; + if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) { + (void) fprintf(stderr, gettext("bad volume " + "size '%s': %s\n"), optarg, + libzfs_error_description(g_zfs)); + goto error; + } + + if (nvlist_add_uint64(props, + zfs_prop_to_name(ZFS_PROP_VOLSIZE), + intval) != 0) { + (void) fprintf(stderr, gettext("internal " + "error: out of memory\n")); + goto error; + } + volsize = intval; + break; + case 'p': + parents = B_TRUE; + break; + case 'b': + bflag = B_TRUE; + if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) { + (void) fprintf(stderr, gettext("bad volume " + "block size '%s': %s\n"), optarg, + libzfs_error_description(g_zfs)); + goto error; + } + + if (nvlist_add_uint64(props, + zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), + intval) != 0) { + (void) fprintf(stderr, gettext("internal " + "error: out of memory\n")); + goto error; + } + break; + case 'o': + if (parseprop(props)) + goto error; + break; + case 's': + noreserve = B_TRUE; + break; + case ':': + (void) fprintf(stderr, gettext("missing size " + "argument\n")); + goto badusage; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + goto badusage; + } + } + + if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) { + (void) fprintf(stderr, gettext("'-s' and '-b' can only be " + "used when creating a volume\n")); + goto badusage; + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc == 0) { + (void) fprintf(stderr, gettext("missing %s argument\n"), + zfs_type_to_name(type)); + goto badusage; + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + goto badusage; + } + + if (type == ZFS_TYPE_VOLUME && !noreserve) { + zpool_handle_t *zpool_handle; + uint64_t spa_version; + char *p; + zfs_prop_t resv_prop; + char *strval; + + if (p = strchr(argv[0], '/')) + *p = '\0'; + zpool_handle = zpool_open(g_zfs, argv[0]); + if (p != NULL) + *p = '/'; + if (zpool_handle == NULL) + goto error; + spa_version = zpool_get_prop_int(zpool_handle, + ZPOOL_PROP_VERSION, NULL); + zpool_close(zpool_handle); + if (spa_version >= SPA_VERSION_REFRESERVATION) + resv_prop = ZFS_PROP_REFRESERVATION; + else + resv_prop = ZFS_PROP_RESERVATION; + + if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop), + &strval) != 0) { + if (nvlist_add_uint64(props, + zfs_prop_to_name(resv_prop), volsize) != 0) { + (void) fprintf(stderr, gettext("internal " + "error: out of memory\n")); + nvlist_free(props); + return (1); + } + } + } + + if (parents && zfs_name_valid(argv[0], type)) { + /* + * Now create the ancestors of target dataset. If the target + * already exists and '-p' option was used we should not + * complain. + */ + if (zfs_dataset_exists(g_zfs, argv[0], type)) { + ret = 0; + goto error; + } + if (zfs_create_ancestors(g_zfs, argv[0]) != 0) + goto error; + } + + /* pass to libzfs */ + if (zfs_create(g_zfs, argv[0], type, props) != 0) + goto error; + + if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL) + goto error; + /* + * if the user doesn't want the dataset automatically mounted, + * then skip the mount/share step + */ + + canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT); + + /* + * Mount and/or share the new filesystem as appropriate. We provide a + * verbose error message to let the user know that their filesystem was + * in fact created, even if we failed to mount or share it. + */ + ret = 0; + if (canmount == ZFS_CANMOUNT_ON) { + if (zfs_mount(zhp, NULL, 0) != 0) { + (void) fprintf(stderr, gettext("filesystem " + "successfully created, but not mounted\n")); + ret = 1; + } else if (zfs_share(zhp) != 0) { + (void) fprintf(stderr, gettext("filesystem " + "successfully created, but not shared\n")); + ret = 1; + } + } + +error: + if (zhp) + zfs_close(zhp); + nvlist_free(props); + return (ret); +badusage: + nvlist_free(props); + usage(B_FALSE); + return (2); +} + +/* + * zfs destroy [-rf] + * + * -r Recursively destroy all children + * -R Recursively destroy all dependents, including clones + * -f Force unmounting of any dependents + * + * Destroys the given dataset. By default, it will unmount any filesystems, + * and refuse to destroy a dataset that has any dependents. A dependent can + * either be a child, or a clone of a child. + */ +typedef struct destroy_cbdata { + boolean_t cb_first; + int cb_force; + int cb_recurse; + int cb_error; + int cb_needforce; + int cb_doclones; + boolean_t cb_closezhp; + zfs_handle_t *cb_target; + char *cb_snapname; +} destroy_cbdata_t; + +/* + * Check for any dependents based on the '-r' or '-R' flags. + */ +static int +destroy_check_dependent(zfs_handle_t *zhp, void *data) +{ + destroy_cbdata_t *cbp = data; + const char *tname = zfs_get_name(cbp->cb_target); + const char *name = zfs_get_name(zhp); + + if (strncmp(tname, name, strlen(tname)) == 0 && + (name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) { + /* + * This is a direct descendant, not a clone somewhere else in + * the hierarchy. + */ + if (cbp->cb_recurse) + goto out; + + if (cbp->cb_first) { + (void) fprintf(stderr, gettext("cannot destroy '%s': " + "%s has children\n"), + zfs_get_name(cbp->cb_target), + zfs_type_to_name(zfs_get_type(cbp->cb_target))); + (void) fprintf(stderr, gettext("use '-r' to destroy " + "the following datasets:\n")); + cbp->cb_first = B_FALSE; + cbp->cb_error = 1; + } + + (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); + } else { + /* + * This is a clone. We only want to report this if the '-r' + * wasn't specified, or the target is a snapshot. + */ + if (!cbp->cb_recurse && + zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT) + goto out; + + if (cbp->cb_first) { + (void) fprintf(stderr, gettext("cannot destroy '%s': " + "%s has dependent clones\n"), + zfs_get_name(cbp->cb_target), + zfs_type_to_name(zfs_get_type(cbp->cb_target))); + (void) fprintf(stderr, gettext("use '-R' to destroy " + "the following datasets:\n")); + cbp->cb_first = B_FALSE; + cbp->cb_error = 1; + } + + (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); + } + +out: + zfs_close(zhp); + return (0); +} + +static int +destroy_callback(zfs_handle_t *zhp, void *data) +{ + destroy_cbdata_t *cbp = data; + + /* + * Ignore pools (which we've already flagged as an error before getting + * here. + */ + if (strchr(zfs_get_name(zhp), '/') == NULL && + zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { + zfs_close(zhp); + return (0); + } + + /* + * Bail out on the first error. + */ + if (zfs_unmount(zhp, NULL, cbp->cb_force ? MS_FORCE : 0) != 0 || + zfs_destroy(zhp) != 0) { + zfs_close(zhp); + return (-1); + } + + zfs_close(zhp); + return (0); +} + +static int +destroy_snap_clones(zfs_handle_t *zhp, void *arg) +{ + destroy_cbdata_t *cbp = arg; + char thissnap[MAXPATHLEN]; + zfs_handle_t *szhp; + boolean_t closezhp = cbp->cb_closezhp; + int rv; + + (void) snprintf(thissnap, sizeof (thissnap), + "%s@%s", zfs_get_name(zhp), cbp->cb_snapname); + + libzfs_print_on_error(g_zfs, B_FALSE); + szhp = zfs_open(g_zfs, thissnap, ZFS_TYPE_SNAPSHOT); + libzfs_print_on_error(g_zfs, B_TRUE); + if (szhp) { + /* + * Destroy any clones of this snapshot + */ + if (zfs_iter_dependents(szhp, B_FALSE, destroy_callback, + cbp) != 0) { + zfs_close(szhp); + if (closezhp) + zfs_close(zhp); + return (-1); + } + zfs_close(szhp); + } + + cbp->cb_closezhp = B_TRUE; + rv = zfs_iter_filesystems(zhp, destroy_snap_clones, arg); + if (closezhp) + zfs_close(zhp); + return (rv); +} + +static int +zfs_do_destroy(int argc, char **argv) +{ + destroy_cbdata_t cb = { 0 }; + int c; + zfs_handle_t *zhp; + char *cp; + + /* check options */ + while ((c = getopt(argc, argv, "frR")) != -1) { + switch (c) { + case 'f': + cb.cb_force = 1; + break; + case 'r': + cb.cb_recurse = 1; + break; + case 'R': + cb.cb_recurse = 1; + cb.cb_doclones = 1; + break; + case '?': + default: + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc == 0) { + (void) fprintf(stderr, gettext("missing path argument\n")); + usage(B_FALSE); + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + /* + * If we are doing recursive destroy of a snapshot, then the + * named snapshot may not exist. Go straight to libzfs. + */ + if (cb.cb_recurse && (cp = strchr(argv[0], '@'))) { + int ret; + + *cp = '\0'; + if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL) + return (1); + *cp = '@'; + cp++; + + if (cb.cb_doclones) { + cb.cb_snapname = cp; + if (destroy_snap_clones(zhp, &cb) != 0) { + zfs_close(zhp); + return (1); + } + } + + ret = zfs_destroy_snaps(zhp, cp); + zfs_close(zhp); + if (ret) { + (void) fprintf(stderr, + gettext("no snapshots destroyed\n")); + } + return (ret != 0); + } + + + /* Open the given dataset */ + if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL) + return (1); + + cb.cb_target = zhp; + + /* + * Perform an explicit check for pools before going any further. + */ + if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL && + zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { + (void) fprintf(stderr, gettext("cannot destroy '%s': " + "operation does not apply to pools\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use 'zfs destroy -r " + "%s' to destroy all datasets in the pool\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use 'zpool destroy %s' " + "to destroy the pool itself\n"), zfs_get_name(zhp)); + zfs_close(zhp); + return (1); + } + + /* + * Check for any dependents and/or clones. + */ + cb.cb_first = B_TRUE; + if (!cb.cb_doclones && + zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent, + &cb) != 0) { + zfs_close(zhp); + return (1); + } + + if (cb.cb_error || + zfs_iter_dependents(zhp, B_FALSE, destroy_callback, &cb) != 0) { + zfs_close(zhp); + return (1); + } + + /* + * Do the real thing. The callback will close the handle regardless of + * whether it succeeds or not. + */ + + if (destroy_callback(zhp, &cb) != 0) + return (1); + + + return (0); +} + +/* + * zfs get [-rHp] [-o field[,field]...] [-s source[,source]...] + * < all | property[,property]... > < fs | snap | vol > ... + * + * -r recurse over any child datasets + * -H scripted mode. Headers are stripped, and fields are separated + * by tabs instead of spaces. + * -o Set of fields to display. One of "name,property,value,source". + * Default is all four. + * -s Set of sources to allow. One of + * "local,default,inherited,temporary,none". Default is all + * five. + * -p Display values in parsable (literal) format. + * + * Prints properties for the given datasets. The user can control which + * columns to display as well as which property types to allow. + */ + +/* + * Invoked to display the properties for a single dataset. + */ +static int +get_callback(zfs_handle_t *zhp, void *data) +{ + char buf[ZFS_MAXPROPLEN]; + zprop_source_t sourcetype; + char source[ZFS_MAXNAMELEN]; + zprop_get_cbdata_t *cbp = data; + nvlist_t *userprop = zfs_get_user_props(zhp); + zprop_list_t *pl = cbp->cb_proplist; + nvlist_t *propval; + char *strval; + char *sourceval; + + for (; pl != NULL; pl = pl->pl_next) { + /* + * Skip the special fake placeholder. This will also skip over + * the name property when 'all' is specified. + */ + if (pl->pl_prop == ZFS_PROP_NAME && + pl == cbp->cb_proplist) + continue; + + if (pl->pl_prop != ZPROP_INVAL) { + if (zfs_prop_get(zhp, pl->pl_prop, buf, + sizeof (buf), &sourcetype, source, + sizeof (source), + cbp->cb_literal) != 0) { + if (pl->pl_all) + continue; + if (!zfs_prop_valid_for_type(pl->pl_prop, + ZFS_TYPE_DATASET)) { + (void) fprintf(stderr, + gettext("No such property '%s'\n"), + zfs_prop_to_name(pl->pl_prop)); + continue; + } + sourcetype = ZPROP_SRC_NONE; + (void) strlcpy(buf, "-", sizeof (buf)); + } + + zprop_print_one_property(zfs_get_name(zhp), cbp, + zfs_prop_to_name(pl->pl_prop), + buf, sourcetype, source); + } else { + if (nvlist_lookup_nvlist(userprop, + pl->pl_user_prop, &propval) != 0) { + if (pl->pl_all) + continue; + sourcetype = ZPROP_SRC_NONE; + strval = "-"; + } else { + verify(nvlist_lookup_string(propval, + ZPROP_VALUE, &strval) == 0); + verify(nvlist_lookup_string(propval, + ZPROP_SOURCE, &sourceval) == 0); + + if (strcmp(sourceval, + zfs_get_name(zhp)) == 0) { + sourcetype = ZPROP_SRC_LOCAL; + } else { + sourcetype = ZPROP_SRC_INHERITED; + (void) strlcpy(source, + sourceval, sizeof (source)); + } + } + + zprop_print_one_property(zfs_get_name(zhp), cbp, + pl->pl_user_prop, strval, sourcetype, + source); + } + } + + return (0); +} + +static int +zfs_do_get(int argc, char **argv) +{ + zprop_get_cbdata_t cb = { 0 }; + int i, c, flags = 0; + char *value, *fields; + int ret; + zprop_list_t fake_name = { 0 }; + + /* + * Set up default columns and sources. + */ + cb.cb_sources = ZPROP_SRC_ALL; + cb.cb_columns[0] = GET_COL_NAME; + cb.cb_columns[1] = GET_COL_PROPERTY; + cb.cb_columns[2] = GET_COL_VALUE; + cb.cb_columns[3] = GET_COL_SOURCE; + cb.cb_type = ZFS_TYPE_DATASET; + + /* check options */ + while ((c = getopt(argc, argv, ":o:s:rHp")) != -1) { + switch (c) { + case 'p': + cb.cb_literal = B_TRUE; + break; + case 'r': + flags |= ZFS_ITER_RECURSE; + break; + case 'H': + cb.cb_scripted = B_TRUE; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case 'o': + /* + * Process the set of columns to display. We zero out + * the structure to give us a blank slate. + */ + bzero(&cb.cb_columns, sizeof (cb.cb_columns)); + i = 0; + while (*optarg != '\0') { + static char *col_subopts[] = + { "name", "property", "value", "source", + NULL }; + + if (i == 4) { + (void) fprintf(stderr, gettext("too " + "many fields given to -o " + "option\n")); + usage(B_FALSE); + } + + switch (getsubopt(&optarg, col_subopts, + &value)) { + case 0: + cb.cb_columns[i++] = GET_COL_NAME; + break; + case 1: + cb.cb_columns[i++] = GET_COL_PROPERTY; + break; + case 2: + cb.cb_columns[i++] = GET_COL_VALUE; + break; + case 3: + cb.cb_columns[i++] = GET_COL_SOURCE; + break; + default: + (void) fprintf(stderr, + gettext("invalid column name " + "'%s'\n"), value); + usage(B_FALSE); + } + } + break; + + case 's': + cb.cb_sources = 0; + while (*optarg != '\0') { + static char *source_subopts[] = { + "local", "default", "inherited", + "temporary", "none", NULL }; + + switch (getsubopt(&optarg, source_subopts, + &value)) { + case 0: + cb.cb_sources |= ZPROP_SRC_LOCAL; + break; + case 1: + cb.cb_sources |= ZPROP_SRC_DEFAULT; + break; + case 2: + cb.cb_sources |= ZPROP_SRC_INHERITED; + break; + case 3: + cb.cb_sources |= ZPROP_SRC_TEMPORARY; + break; + case 4: + cb.cb_sources |= ZPROP_SRC_NONE; + break; + default: + (void) fprintf(stderr, + gettext("invalid source " + "'%s'\n"), value); + usage(B_FALSE); + } + } + break; + + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (argc < 1) { + (void) fprintf(stderr, gettext("missing property " + "argument\n")); + usage(B_FALSE); + } + + fields = argv[0]; + + if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET) + != 0) + usage(B_FALSE); + + argc--; + argv++; + + /* + * As part of zfs_expand_proplist(), we keep track of the maximum column + * width for each property. For the 'NAME' (and 'SOURCE') columns, we + * need to know the maximum name length. However, the user likely did + * not specify 'name' as one of the properties to fetch, so we need to + * make sure we always include at least this property for + * print_get_headers() to work properly. + */ + if (cb.cb_proplist != NULL) { + fake_name.pl_prop = ZFS_PROP_NAME; + fake_name.pl_width = strlen(gettext("NAME")); + fake_name.pl_next = cb.cb_proplist; + cb.cb_proplist = &fake_name; + } + + cb.cb_first = B_TRUE; + + /* run for each object */ + ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET, NULL, + &cb.cb_proplist, get_callback, &cb); + + if (cb.cb_proplist == &fake_name) + zprop_free_list(fake_name.pl_next); + else + zprop_free_list(cb.cb_proplist); + + return (ret); +} + +/* + * inherit [-r] ... + * + * -r Recurse over all children + * + * For each dataset specified on the command line, inherit the given property + * from its parent. Inheriting a property at the pool level will cause it to + * use the default value. The '-r' flag will recurse over all children, and is + * useful for setting a property on a hierarchy-wide basis, regardless of any + * local modifications for each dataset. + */ + +static int +inherit_recurse_cb(zfs_handle_t *zhp, void *data) +{ + char *propname = data; + zfs_prop_t prop = zfs_name_to_prop(propname); + + /* + * If we're doing it recursively, then ignore properties that + * are not valid for this type of dataset. + */ + if (prop != ZPROP_INVAL && + !zfs_prop_valid_for_type(prop, zfs_get_type(zhp))) + return (0); + + return (zfs_prop_inherit(zhp, propname) != 0); +} + +static int +inherit_cb(zfs_handle_t *zhp, void *data) +{ + char *propname = data; + + return (zfs_prop_inherit(zhp, propname) != 0); +} + +static int +zfs_do_inherit(int argc, char **argv) +{ + int c; + zfs_prop_t prop; + char *propname; + int ret; + int flags = 0; + + /* check options */ + while ((c = getopt(argc, argv, "r")) != -1) { + switch (c) { + case 'r': + flags |= ZFS_ITER_RECURSE; + break; + case '?': + default: + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing property argument\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing dataset argument\n")); + usage(B_FALSE); + } + + propname = argv[0]; + argc--; + argv++; + + if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) { + if (zfs_prop_readonly(prop)) { + (void) fprintf(stderr, gettext( + "%s property is read-only\n"), + propname); + return (1); + } + if (!zfs_prop_inheritable(prop)) { + (void) fprintf(stderr, gettext("'%s' property cannot " + "be inherited\n"), propname); + if (prop == ZFS_PROP_QUOTA || + prop == ZFS_PROP_RESERVATION || + prop == ZFS_PROP_REFQUOTA || + prop == ZFS_PROP_REFRESERVATION) + (void) fprintf(stderr, gettext("use 'zfs set " + "%s=none' to clear\n"), propname); + return (1); + } + } else if (!zfs_prop_user(propname)) { + (void) fprintf(stderr, gettext("invalid property '%s'\n"), + propname); + usage(B_FALSE); + } + + if (flags & ZFS_ITER_RECURSE) { + ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET, + NULL, NULL, inherit_recurse_cb, propname); + } else { + ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET, + NULL, NULL, inherit_cb, propname); + } + + return (ret); +} + +typedef struct upgrade_cbdata { + uint64_t cb_numupgraded; + uint64_t cb_numsamegraded; + uint64_t cb_numfailed; + uint64_t cb_version; + boolean_t cb_newer; + boolean_t cb_foundone; + char cb_lastfs[ZFS_MAXNAMELEN]; +} upgrade_cbdata_t; + +static int +same_pool(zfs_handle_t *zhp, const char *name) +{ + int len1 = strcspn(name, "/@"); + const char *zhname = zfs_get_name(zhp); + int len2 = strcspn(zhname, "/@"); + + if (len1 != len2) + return (B_FALSE); + return (strncmp(name, zhname, len1) == 0); +} + +static int +upgrade_list_callback(zfs_handle_t *zhp, void *data) +{ + upgrade_cbdata_t *cb = data; + int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); + + /* list if it's old/new */ + if ((!cb->cb_newer && version < ZPL_VERSION) || + (cb->cb_newer && version > ZPL_VERSION)) { + char *str; + if (cb->cb_newer) { + str = gettext("The following filesystems are " + "formatted using a newer software version and\n" + "cannot be accessed on the current system.\n\n"); + } else { + str = gettext("The following filesystems are " + "out of date, and can be upgraded. After being\n" + "upgraded, these filesystems (and any 'zfs send' " + "streams generated from\n" + "subsequent snapshots) will no longer be " + "accessible by older software versions.\n\n"); + } + + if (!cb->cb_foundone) { + (void) puts(str); + (void) printf(gettext("VER FILESYSTEM\n")); + (void) printf(gettext("--- ------------\n")); + cb->cb_foundone = B_TRUE; + } + + (void) printf("%2u %s\n", version, zfs_get_name(zhp)); + } + + return (0); +} + +static int +upgrade_set_callback(zfs_handle_t *zhp, void *data) +{ + upgrade_cbdata_t *cb = data; + int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); + + if (cb->cb_version >= ZPL_VERSION_FUID) { + int spa_version; + + if (zfs_spa_version(zhp, &spa_version) < 0) + return (-1); + + if (spa_version < SPA_VERSION_FUID) { + /* can't upgrade */ + (void) printf(gettext("%s: can not be upgraded; " + "the pool version needs to first be upgraded\nto " + "version %d\n\n"), + zfs_get_name(zhp), SPA_VERSION_FUID); + cb->cb_numfailed++; + return (0); + } + } + + /* upgrade */ + if (version < cb->cb_version) { + char verstr[16]; + (void) snprintf(verstr, sizeof (verstr), + "%llu", cb->cb_version); + if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) { + /* + * If they did "zfs upgrade -a", then we could + * be doing ioctls to different pools. We need + * to log this history once to each pool. + */ + verify(zpool_stage_history(g_zfs, history_str) == 0); + } + if (zfs_prop_set(zhp, "version", verstr) == 0) + cb->cb_numupgraded++; + else + cb->cb_numfailed++; + (void) strcpy(cb->cb_lastfs, zfs_get_name(zhp)); + } else if (version > cb->cb_version) { + /* can't downgrade */ + (void) printf(gettext("%s: can not be downgraded; " + "it is already at version %u\n"), + zfs_get_name(zhp), version); + cb->cb_numfailed++; + } else { + cb->cb_numsamegraded++; + } + return (0); +} + +/* + * zfs upgrade + * zfs upgrade -v + * zfs upgrade [-r] [-V ] <-a | filesystem> + */ +static int +zfs_do_upgrade(int argc, char **argv) +{ + boolean_t all = B_FALSE; + boolean_t showversions = B_FALSE; + int ret; + upgrade_cbdata_t cb = { 0 }; + char c; + int flags = ZFS_ITER_ARGS_CAN_BE_PATHS; + + /* check options */ + while ((c = getopt(argc, argv, "rvV:a")) != -1) { + switch (c) { + case 'r': + flags |= ZFS_ITER_RECURSE; + break; + case 'v': + showversions = B_TRUE; + break; + case 'V': + if (zfs_prop_string_to_index(ZFS_PROP_VERSION, + optarg, &cb.cb_version) != 0) { + (void) fprintf(stderr, + gettext("invalid version %s\n"), optarg); + usage(B_FALSE); + } + break; + case 'a': + all = B_TRUE; + break; + case '?': + default: + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version)) + usage(B_FALSE); + if (showversions && (flags & ZFS_ITER_RECURSE || all || + cb.cb_version || argc)) + usage(B_FALSE); + if ((all || argc) && (showversions)) + usage(B_FALSE); + if (all && argc) + usage(B_FALSE); + + if (showversions) { + /* Show info on available versions. */ + (void) printf(gettext("The following filesystem versions are " + "supported:\n\n")); + (void) printf(gettext("VER DESCRIPTION\n")); + (void) printf("--- -----------------------------------------" + "---------------\n"); + (void) printf(gettext(" 1 Initial ZFS filesystem version\n")); + (void) printf(gettext(" 2 Enhanced directory entries\n")); + (void) printf(gettext(" 3 Case insensitive and File system " + "unique identifer (FUID)\n")); + (void) printf(gettext("\nFor more information on a particular " + "version, including supported releases, see:\n\n")); + (void) printf("http://www.opensolaris.org/os/community/zfs/" + "version/zpl/N\n\n"); + (void) printf(gettext("Where 'N' is the version number.\n")); + ret = 0; + } else if (argc || all) { + /* Upgrade filesystems */ + if (cb.cb_version == 0) + cb.cb_version = ZPL_VERSION; + ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM, + NULL, NULL, upgrade_set_callback, &cb); + (void) printf(gettext("%llu filesystems upgraded\n"), + cb.cb_numupgraded); + if (cb.cb_numsamegraded) { + (void) printf(gettext("%llu filesystems already at " + "this version\n"), + cb.cb_numsamegraded); + } + if (cb.cb_numfailed != 0) + ret = 1; + } else { + /* List old-version filesytems */ + boolean_t found; + (void) printf(gettext("This system is currently running " + "ZFS filesystem version %llu.\n\n"), ZPL_VERSION); + + flags |= ZFS_ITER_RECURSE; + ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM, + NULL, NULL, upgrade_list_callback, &cb); + + found = cb.cb_foundone; + cb.cb_foundone = B_FALSE; + cb.cb_newer = B_TRUE; + + ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM, + NULL, NULL, upgrade_list_callback, &cb); + + if (!cb.cb_foundone && !found) { + (void) printf(gettext("All filesystems are " + "formatted with the current version.\n")); + } + } + + return (ret); +} + +/* + * list [-rH] [-o property[,property]...] [-t type[,type]...] + * [-s property [-s property]...] [-S property [-S property]...] + * ... + * + * -r Recurse over all children + * -H Scripted mode; elide headers and separate columns by tabs + * -o Control which fields to display. + * -t Control which object types to display. + * -s Specify sort columns, descending order. + * -S Specify sort columns, ascending order. + * + * When given no arguments, lists all filesystems in the system. + * Otherwise, list the specified datasets, optionally recursing down them if + * '-r' is specified. + */ +typedef struct list_cbdata { + boolean_t cb_first; + boolean_t cb_scripted; + zprop_list_t *cb_proplist; +} list_cbdata_t; + +/* + * Given a list of columns to display, output appropriate headers for each one. + */ +static void +print_header(zprop_list_t *pl) +{ + char headerbuf[ZFS_MAXPROPLEN]; + const char *header; + int i; + boolean_t first = B_TRUE; + boolean_t right_justify; + + for (; pl != NULL; pl = pl->pl_next) { + if (!first) { + (void) printf(" "); + } else { + first = B_FALSE; + } + + right_justify = B_FALSE; + if (pl->pl_prop != ZPROP_INVAL) { + header = zfs_prop_column_name(pl->pl_prop); + right_justify = zfs_prop_align_right(pl->pl_prop); + } else { + for (i = 0; pl->pl_user_prop[i] != '\0'; i++) + headerbuf[i] = toupper(pl->pl_user_prop[i]); + headerbuf[i] = '\0'; + header = headerbuf; + } + + if (pl->pl_next == NULL && !right_justify) + (void) printf("%s", header); + else if (right_justify) + (void) printf("%*s", pl->pl_width, header); + else + (void) printf("%-*s", pl->pl_width, header); + } + + (void) printf("\n"); +} + +/* + * Given a dataset and a list of fields, print out all the properties according + * to the described layout. + */ +static void +print_dataset(zfs_handle_t *zhp, zprop_list_t *pl, boolean_t scripted) +{ + boolean_t first = B_TRUE; + char property[ZFS_MAXPROPLEN]; + nvlist_t *userprops = zfs_get_user_props(zhp); + nvlist_t *propval; + char *propstr; + boolean_t right_justify; + int width; + + for (; pl != NULL; pl = pl->pl_next) { + if (!first) { + if (scripted) + (void) printf("\t"); + else + (void) printf(" "); + } else { + first = B_FALSE; + } + + right_justify = B_FALSE; + if (pl->pl_prop != ZPROP_INVAL) { + if (zfs_prop_get(zhp, pl->pl_prop, property, + sizeof (property), NULL, NULL, 0, B_FALSE) != 0) + propstr = "-"; + else + propstr = property; + + right_justify = zfs_prop_align_right(pl->pl_prop); + } else { + if (nvlist_lookup_nvlist(userprops, + pl->pl_user_prop, &propval) != 0) + propstr = "-"; + else + verify(nvlist_lookup_string(propval, + ZPROP_VALUE, &propstr) == 0); + } + + width = pl->pl_width; + + /* + * If this is being called in scripted mode, or if this is the + * last column and it is left-justified, don't include a width + * format specifier. + */ + if (scripted || (pl->pl_next == NULL && !right_justify)) + (void) printf("%s", propstr); + else if (right_justify) + (void) printf("%*s", width, propstr); + else + (void) printf("%-*s", width, propstr); + } + + (void) printf("\n"); +} + +/* + * Generic callback function to list a dataset or snapshot. + */ +static int +list_callback(zfs_handle_t *zhp, void *data) +{ + list_cbdata_t *cbp = data; + + if (cbp->cb_first) { + if (!cbp->cb_scripted) + print_header(cbp->cb_proplist); + cbp->cb_first = B_FALSE; + } + + print_dataset(zhp, cbp->cb_proplist, cbp->cb_scripted); + + return (0); +} + +static int +zfs_do_list(int argc, char **argv) +{ + int c; + boolean_t scripted = B_FALSE; + static char default_fields[] = + "name,used,available,referenced,mountpoint"; + int types = ZFS_TYPE_DATASET; + boolean_t types_specified = B_FALSE; + char *fields = NULL; + list_cbdata_t cb = { 0 }; + char *value; + int ret; + zfs_sort_column_t *sortcol = NULL; + int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS; + + /* check options */ + while ((c = getopt(argc, argv, ":o:rt:Hs:S:")) != -1) { + switch (c) { + case 'o': + fields = optarg; + break; + case 'r': + flags |= ZFS_ITER_RECURSE; + break; + case 'H': + scripted = B_TRUE; + break; + case 's': + if (zfs_add_sort_column(&sortcol, optarg, + B_FALSE) != 0) { + (void) fprintf(stderr, + gettext("invalid property '%s'\n"), optarg); + usage(B_FALSE); + } + break; + case 'S': + if (zfs_add_sort_column(&sortcol, optarg, + B_TRUE) != 0) { + (void) fprintf(stderr, + gettext("invalid property '%s'\n"), optarg); + usage(B_FALSE); + } + break; + case 't': + types = 0; + types_specified = B_TRUE; + flags &= ~ZFS_ITER_PROP_LISTSNAPS; + while (*optarg != '\0') { + static char *type_subopts[] = { "filesystem", + "volume", "snapshot", "all", NULL }; + + switch (getsubopt(&optarg, type_subopts, + &value)) { + case 0: + types |= ZFS_TYPE_FILESYSTEM; + break; + case 1: + types |= ZFS_TYPE_VOLUME; + break; + case 2: + types |= ZFS_TYPE_SNAPSHOT; + break; + case 3: + types = ZFS_TYPE_DATASET; + break; + + default: + (void) fprintf(stderr, + gettext("invalid type '%s'\n"), + value); + usage(B_FALSE); + } + } + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (fields == NULL) + fields = default_fields; + + /* + * If "-o space" and no types were specified, don't display snapshots. + */ + if (strcmp(fields, "space") == 0 && types_specified == B_FALSE) + types &= ~ZFS_TYPE_SNAPSHOT; + + /* + * If the user specifies '-o all', the zprop_get_list() doesn't + * normally include the name of the dataset. For 'zfs list', we always + * want this property to be first. + */ + if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET) + != 0) + usage(B_FALSE); + + cb.cb_scripted = scripted; + cb.cb_first = B_TRUE; + + ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist, + list_callback, &cb); + + zprop_free_list(cb.cb_proplist); + zfs_free_sort_columns(sortcol); + + if (ret == 0 && cb.cb_first && !cb.cb_scripted) + (void) printf(gettext("no datasets available\n")); + + return (ret); +} + +/* + * zfs rename + * zfs rename -p + * zfs rename -r + * + * Renames the given dataset to another of the same type. + * + * The '-p' flag creates all the non-existing ancestors of the target first. + */ +/* ARGSUSED */ +static int +zfs_do_rename(int argc, char **argv) +{ + zfs_handle_t *zhp; + int c; + int ret; + boolean_t recurse = B_FALSE; + boolean_t parents = B_FALSE; + + /* check options */ + while ((c = getopt(argc, argv, "pr")) != -1) { + switch (c) { + case 'p': + parents = B_TRUE; + break; + case 'r': + recurse = B_TRUE; + break; + case '?': + default: + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing source dataset " + "argument\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing target dataset " + "argument\n")); + usage(B_FALSE); + } + if (argc > 2) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + if (recurse && parents) { + (void) fprintf(stderr, gettext("-p and -r options are mutually " + "exclusive\n")); + usage(B_FALSE); + } + + if (recurse && strchr(argv[0], '@') == 0) { + (void) fprintf(stderr, gettext("source dataset for recursive " + "rename must be a snapshot\n")); + usage(B_FALSE); + } + + if ((zhp = zfs_open(g_zfs, argv[0], parents ? ZFS_TYPE_FILESYSTEM | + ZFS_TYPE_VOLUME : ZFS_TYPE_DATASET)) == NULL) + return (1); + + /* If we were asked and the name looks good, try to create ancestors. */ + if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) && + zfs_create_ancestors(g_zfs, argv[1]) != 0) { + zfs_close(zhp); + return (1); + } + + ret = (zfs_rename(zhp, argv[1], recurse) != 0); + + zfs_close(zhp); + return (ret); +} + +/* + * zfs promote + * + * Promotes the given clone fs to be the parent + */ +/* ARGSUSED */ +static int +zfs_do_promote(int argc, char **argv) +{ + zfs_handle_t *zhp; + int ret; + + /* check options */ + if (argc > 1 && argv[1][0] == '-') { + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + argv[1][1]); + usage(B_FALSE); + } + + /* check number of arguments */ + if (argc < 2) { + (void) fprintf(stderr, gettext("missing clone filesystem" + " argument\n")); + usage(B_FALSE); + } + if (argc > 2) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); + if (zhp == NULL) + return (1); + + ret = (zfs_promote(zhp) != 0); + + + zfs_close(zhp); + return (ret); +} + +/* + * zfs rollback [-rRf] + * + * -r Delete any intervening snapshots before doing rollback + * -R Delete any snapshots and their clones + * -f ignored for backwards compatability + * + * Given a filesystem, rollback to a specific snapshot, discarding any changes + * since then and making it the active dataset. If more recent snapshots exist, + * the command will complain unless the '-r' flag is given. + */ +typedef struct rollback_cbdata { + uint64_t cb_create; + boolean_t cb_first; + int cb_doclones; + char *cb_target; + int cb_error; + boolean_t cb_recurse; + boolean_t cb_dependent; +} rollback_cbdata_t; + +/* + * Report any snapshots more recent than the one specified. Used when '-r' is + * not specified. We reuse this same callback for the snapshot dependents - if + * 'cb_dependent' is set, then this is a dependent and we should report it + * without checking the transaction group. + */ +static int +rollback_check(zfs_handle_t *zhp, void *data) +{ + rollback_cbdata_t *cbp = data; + + if (cbp->cb_doclones) { + zfs_close(zhp); + return (0); + } + + if (!cbp->cb_dependent) { + if (strcmp(zfs_get_name(zhp), cbp->cb_target) != 0 && + zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && + zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > + cbp->cb_create) { + + if (cbp->cb_first && !cbp->cb_recurse) { + (void) fprintf(stderr, gettext("cannot " + "rollback to '%s': more recent snapshots " + "exist\n"), + cbp->cb_target); + (void) fprintf(stderr, gettext("use '-r' to " + "force deletion of the following " + "snapshots:\n")); + cbp->cb_first = 0; + cbp->cb_error = 1; + } + + if (cbp->cb_recurse) { + cbp->cb_dependent = B_TRUE; + if (zfs_iter_dependents(zhp, B_TRUE, + rollback_check, cbp) != 0) { + zfs_close(zhp); + return (-1); + } + cbp->cb_dependent = B_FALSE; + } else { + (void) fprintf(stderr, "%s\n", + zfs_get_name(zhp)); + } + } + } else { + if (cbp->cb_first && cbp->cb_recurse) { + (void) fprintf(stderr, gettext("cannot rollback to " + "'%s': clones of previous snapshots exist\n"), + cbp->cb_target); + (void) fprintf(stderr, gettext("use '-R' to " + "force deletion of the following clones and " + "dependents:\n")); + cbp->cb_first = 0; + cbp->cb_error = 1; + } + + (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); + } + + zfs_close(zhp); + return (0); +} + +static int +zfs_do_rollback(int argc, char **argv) +{ + int ret; + int c; + boolean_t force = B_FALSE; + rollback_cbdata_t cb = { 0 }; + zfs_handle_t *zhp, *snap; + char parentname[ZFS_MAXNAMELEN]; + char *delim; + + /* check options */ + while ((c = getopt(argc, argv, "rRf")) != -1) { + switch (c) { + case 'r': + cb.cb_recurse = 1; + break; + case 'R': + cb.cb_recurse = 1; + cb.cb_doclones = 1; + break; + case 'f': + force = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing dataset argument\n")); + usage(B_FALSE); + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + /* open the snapshot */ + if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) + return (1); + + /* open the parent dataset */ + (void) strlcpy(parentname, argv[0], sizeof (parentname)); + verify((delim = strrchr(parentname, '@')) != NULL); + *delim = '\0'; + if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) { + zfs_close(snap); + return (1); + } + + /* + * Check for more recent snapshots and/or clones based on the presence + * of '-r' and '-R'. + */ + cb.cb_target = argv[0]; + cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG); + cb.cb_first = B_TRUE; + cb.cb_error = 0; + if ((ret = zfs_iter_children(zhp, rollback_check, &cb)) != 0) + goto out; + + if ((ret = cb.cb_error) != 0) + goto out; + + /* + * Rollback parent to the given snapshot. + */ + ret = zfs_rollback(zhp, snap, force); + +out: + zfs_close(snap); + zfs_close(zhp); + + if (ret == 0) + return (0); + else + return (1); +} + +/* + * zfs set property=value { fs | snap | vol } ... + * + * Sets the given property for all datasets specified on the command line. + */ +typedef struct set_cbdata { + char *cb_propname; + char *cb_value; +} set_cbdata_t; + +static int +set_callback(zfs_handle_t *zhp, void *data) +{ + set_cbdata_t *cbp = data; + + if (zfs_prop_set(zhp, cbp->cb_propname, cbp->cb_value) != 0) { + switch (libzfs_errno(g_zfs)) { + case EZFS_MOUNTFAILED: + (void) fprintf(stderr, gettext("property may be set " + "but unable to remount filesystem\n")); + break; + case EZFS_SHARENFSFAILED: + (void) fprintf(stderr, gettext("property may be set " + "but unable to reshare filesystem\n")); + break; + } + return (1); + } + return (0); +} + +static int +zfs_do_set(int argc, char **argv) +{ + set_cbdata_t cb; + int ret; + + /* check for options */ + if (argc > 1 && argv[1][0] == '-') { + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + argv[1][1]); + usage(B_FALSE); + } + + /* check number of arguments */ + if (argc < 2) { + (void) fprintf(stderr, gettext("missing property=value " + "argument\n")); + usage(B_FALSE); + } + if (argc < 3) { + (void) fprintf(stderr, gettext("missing dataset name\n")); + usage(B_FALSE); + } + + /* validate property=value argument */ + cb.cb_propname = argv[1]; + if (((cb.cb_value = strchr(cb.cb_propname, '=')) == NULL) || + (cb.cb_value[1] == '\0')) { + (void) fprintf(stderr, gettext("missing value in " + "property=value argument\n")); + usage(B_FALSE); + } + + *cb.cb_value = '\0'; + cb.cb_value++; + + if (*cb.cb_propname == '\0') { + (void) fprintf(stderr, + gettext("missing property in property=value argument\n")); + usage(B_FALSE); + } + + ret = zfs_for_each(argc - 2, argv + 2, NULL, + ZFS_TYPE_DATASET, NULL, NULL, set_callback, &cb); + + return (ret); +} + +/* + * zfs snapshot [-r] [-o prop=value] ... + * + * Creates a snapshot with the given name. While functionally equivalent to + * 'zfs create', it is a separate command to differentiate intent. + */ +static int +zfs_do_snapshot(int argc, char **argv) +{ + boolean_t recursive = B_FALSE; + int ret; + char c; + nvlist_t *props; + + if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { + (void) fprintf(stderr, gettext("internal error: " + "out of memory\n")); + return (1); + } + + /* check options */ + while ((c = getopt(argc, argv, "ro:")) != -1) { + switch (c) { + case 'o': + if (parseprop(props)) + return (1); + break; + case 'r': + recursive = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + goto usage; + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing snapshot argument\n")); + goto usage; + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + goto usage; + } + + ret = zfs_snapshot(g_zfs, argv[0], recursive, props); + nvlist_free(props); + if (ret && recursive) + (void) fprintf(stderr, gettext("no snapshots were created\n")); + return (ret != 0); + +usage: + nvlist_free(props); + usage(B_FALSE); + return (-1); +} + +/* + * zfs send [-v] -R [-i|-I <@snap>] + * zfs send [-v] [-i|-I <@snap>] + * + * Send a backup stream to stdout. + */ +static int +zfs_do_send(int argc, char **argv) +{ + char *fromname = NULL; + char *toname = NULL; + char *cp; + zfs_handle_t *zhp; + boolean_t doall = B_FALSE; + boolean_t replicate = B_FALSE; + boolean_t fromorigin = B_FALSE; + boolean_t verbose = B_FALSE; + int c, err; + + /* check options */ + while ((c = getopt(argc, argv, ":i:I:Rv")) != -1) { + switch (c) { + case 'i': + if (fromname) + usage(B_FALSE); + fromname = optarg; + break; + case 'I': + if (fromname) + usage(B_FALSE); + fromname = optarg; + doall = B_TRUE; + break; + case 'R': + replicate = B_TRUE; + break; + case 'v': + verbose = B_TRUE; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing snapshot argument\n")); + usage(B_FALSE); + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + if (isatty(STDOUT_FILENO)) { + (void) fprintf(stderr, + gettext("Error: Stream can not be written to a terminal.\n" + "You must redirect standard output.\n")); + return (1); + } + + cp = strchr(argv[0], '@'); + if (cp == NULL) { + (void) fprintf(stderr, + gettext("argument must be a snapshot\n")); + usage(B_FALSE); + } + *cp = '\0'; + toname = cp + 1; + zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); + if (zhp == NULL) + return (1); + + /* + * If they specified the full path to the snapshot, chop off + * everything except the short name of the snapshot, but special + * case if they specify the origin. + */ + if (fromname && (cp = strchr(fromname, '@')) != NULL) { + char origin[ZFS_MAXNAMELEN]; + zprop_source_t src; + + (void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN, + origin, sizeof (origin), &src, NULL, 0, B_FALSE); + + if (strcmp(origin, fromname) == 0) { + fromname = NULL; + fromorigin = B_TRUE; + } else { + *cp = '\0'; + if (cp != fromname && strcmp(argv[0], fromname)) { + (void) fprintf(stderr, + gettext("incremental source must be " + "in same filesystem\n")); + usage(B_FALSE); + } + fromname = cp + 1; + if (strchr(fromname, '@') || strchr(fromname, '/')) { + (void) fprintf(stderr, + gettext("invalid incremental source\n")); + usage(B_FALSE); + } + } + } + + if (replicate && fromname == NULL) + doall = B_TRUE; + + err = zfs_send(zhp, fromname, toname, replicate, doall, fromorigin, + verbose, STDOUT_FILENO); + zfs_close(zhp); + + return (err != 0); +} + +/* + * zfs receive [-dnvF] + * + * Restore a backup stream from stdin. + */ +static int +zfs_do_receive(int argc, char **argv) +{ + int c, err; + recvflags_t flags; + + bzero(&flags, sizeof (recvflags_t)); + /* check options */ + while ((c = getopt(argc, argv, ":dnvF")) != -1) { + switch (c) { + case 'd': + flags.isprefix = B_TRUE; + break; + case 'n': + flags.dryrun = B_TRUE; + break; + case 'v': + flags.verbose = B_TRUE; + break; + case 'F': + flags.force = B_TRUE; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing snapshot argument\n")); + usage(B_FALSE); + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + if (isatty(STDIN_FILENO)) { + (void) fprintf(stderr, + gettext("Error: Backup stream can not be read " + "from a terminal.\n" + "You must redirect standard input.\n")); + return (1); + } + + err = zfs_receive(g_zfs, argv[0], flags, STDIN_FILENO, NULL); + + return (err != 0); +} + +typedef struct allow_cb { + int a_permcnt; + size_t a_treeoffset; +} allow_cb_t; + +static void +zfs_print_perms(avl_tree_t *tree) +{ + zfs_perm_node_t *permnode; + + permnode = avl_first(tree); + while (permnode != NULL) { + (void) printf("%s", permnode->z_pname); + permnode = AVL_NEXT(tree, permnode); + if (permnode) + (void) printf(","); + else + (void) printf("\n"); + } +} + +/* + * Iterate over user/groups/everyone/... and the call perm_iter + * function to print actual permission when tree has >0 nodes. + */ +static void +zfs_iter_perms(avl_tree_t *tree, const char *banner, allow_cb_t *cb) +{ + zfs_allow_node_t *item; + avl_tree_t *ptree; + + item = avl_first(tree); + while (item) { + ptree = (void *)((char *)item + cb->a_treeoffset); + if (avl_numnodes(ptree)) { + if (cb->a_permcnt++ == 0) + (void) printf("%s\n", banner); + (void) printf("\t%s", item->z_key); + /* + * Avoid an extra space being printed + * for "everyone" which is keyed with a null + * string + */ + if (item->z_key[0] != '\0') + (void) printf(" "); + zfs_print_perms(ptree); + } + item = AVL_NEXT(tree, item); + } +} + +#define LINES "-------------------------------------------------------------\n" +static int +zfs_print_allows(char *ds) +{ + zfs_allow_t *curperms, *perms; + zfs_handle_t *zhp; + allow_cb_t allowcb = { 0 }; + char banner[MAXPATHLEN]; + + if (ds[0] == '-') + usage(B_FALSE); + + if (strrchr(ds, '@')) { + (void) fprintf(stderr, gettext("Snapshots don't have 'allow'" + " permissions\n")); + return (1); + } + if ((zhp = zfs_open(g_zfs, ds, ZFS_TYPE_DATASET)) == NULL) + return (1); + + if (zfs_perm_get(zhp, &perms)) { + (void) fprintf(stderr, + gettext("Failed to retrieve 'allows' on %s\n"), ds); + zfs_close(zhp); + return (1); + } + + zfs_close(zhp); + + if (perms != NULL) + (void) printf("%s", LINES); + for (curperms = perms; curperms; curperms = curperms->z_next) { + + (void) snprintf(banner, sizeof (banner), + "Permission sets on (%s)", curperms->z_setpoint); + allowcb.a_treeoffset = + offsetof(zfs_allow_node_t, z_localdescend); + allowcb.a_permcnt = 0; + zfs_iter_perms(&curperms->z_sets, banner, &allowcb); + + (void) snprintf(banner, sizeof (banner), + "Create time permissions on (%s)", curperms->z_setpoint); + allowcb.a_treeoffset = + offsetof(zfs_allow_node_t, z_localdescend); + allowcb.a_permcnt = 0; + zfs_iter_perms(&curperms->z_crperms, banner, &allowcb); + + + (void) snprintf(banner, sizeof (banner), + "Local permissions on (%s)", curperms->z_setpoint); + allowcb.a_treeoffset = offsetof(zfs_allow_node_t, z_local); + allowcb.a_permcnt = 0; + zfs_iter_perms(&curperms->z_user, banner, &allowcb); + zfs_iter_perms(&curperms->z_group, banner, &allowcb); + zfs_iter_perms(&curperms->z_everyone, banner, &allowcb); + + (void) snprintf(banner, sizeof (banner), + "Descendent permissions on (%s)", curperms->z_setpoint); + allowcb.a_treeoffset = offsetof(zfs_allow_node_t, z_descend); + allowcb.a_permcnt = 0; + zfs_iter_perms(&curperms->z_user, banner, &allowcb); + zfs_iter_perms(&curperms->z_group, banner, &allowcb); + zfs_iter_perms(&curperms->z_everyone, banner, &allowcb); + + (void) snprintf(banner, sizeof (banner), + "Local+Descendent permissions on (%s)", + curperms->z_setpoint); + allowcb.a_treeoffset = + offsetof(zfs_allow_node_t, z_localdescend); + allowcb.a_permcnt = 0; + zfs_iter_perms(&curperms->z_user, banner, &allowcb); + zfs_iter_perms(&curperms->z_group, banner, &allowcb); + zfs_iter_perms(&curperms->z_everyone, banner, &allowcb); + + (void) printf("%s", LINES); + } + zfs_free_allows(perms); + return (0); +} + +#define ALLOWOPTIONS "ldcsu:g:e" +#define UNALLOWOPTIONS "ldcsu:g:er" + +/* + * Validate options, and build necessary datastructure to display/remove/add + * permissions. + * Returns 0 - If permissions should be added/removed + * Returns 1 - If permissions should be displayed. + * Returns -1 - on failure + */ +int +parse_allow_args(int *argc, char **argv[], boolean_t unallow, + char **ds, int *recurse, nvlist_t **zperms) +{ + int c; + char *options = unallow ? UNALLOWOPTIONS : ALLOWOPTIONS; + zfs_deleg_inherit_t deleg_type = ZFS_DELEG_NONE; + zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN; + char *who = NULL; + char *perms = NULL; + zfs_handle_t *zhp; + + while ((c = getopt(*argc, *argv, options)) != -1) { + switch (c) { + case 'l': + if (who_type == ZFS_DELEG_CREATE || + who_type == ZFS_DELEG_NAMED_SET) + usage(B_FALSE); + + deleg_type |= ZFS_DELEG_PERM_LOCAL; + break; + case 'd': + if (who_type == ZFS_DELEG_CREATE || + who_type == ZFS_DELEG_NAMED_SET) + usage(B_FALSE); + + deleg_type |= ZFS_DELEG_PERM_DESCENDENT; + break; + case 'r': + *recurse = B_TRUE; + break; + case 'c': + if (who_type != ZFS_DELEG_WHO_UNKNOWN) + usage(B_FALSE); + if (deleg_type) + usage(B_FALSE); + who_type = ZFS_DELEG_CREATE; + break; + case 's': + if (who_type != ZFS_DELEG_WHO_UNKNOWN) + usage(B_FALSE); + if (deleg_type) + usage(B_FALSE); + who_type = ZFS_DELEG_NAMED_SET; + break; + case 'u': + if (who_type != ZFS_DELEG_WHO_UNKNOWN) + usage(B_FALSE); + who_type = ZFS_DELEG_USER; + who = optarg; + break; + case 'g': + if (who_type != ZFS_DELEG_WHO_UNKNOWN) + usage(B_FALSE); + who_type = ZFS_DELEG_GROUP; + who = optarg; + break; + case 'e': + if (who_type != ZFS_DELEG_WHO_UNKNOWN) + usage(B_FALSE); + who_type = ZFS_DELEG_EVERYONE; + break; + default: + usage(B_FALSE); + break; + } + } + + if (deleg_type == 0) + deleg_type = ZFS_DELEG_PERM_LOCALDESCENDENT; + + *argc -= optind; + *argv += optind; + + if (unallow == B_FALSE && *argc == 1) { + /* + * Only print permissions if no options were processed + */ + if (optind == 1) + return (1); + else + usage(B_FALSE); + } + + /* + * initialize variables for zfs_build_perms based on number + * of arguments. + * 3 arguments ==> zfs [un]allow joe perm,perm,perm or + * zfs [un]allow -s @set1 perm,perm + * 2 arguments ==> zfs [un]allow -c perm,perm or + * zfs [un]allow -u|-g perm or + * zfs [un]allow -e perm,perm + * zfs unallow joe + * zfs unallow -s @set1 + * 1 argument ==> zfs [un]allow -e or + * zfs [un]allow -c + */ + + switch (*argc) { + case 3: + perms = (*argv)[1]; + who = (*argv)[0]; + *ds = (*argv)[2]; + + /* + * advance argc/argv for do_allow cases. + * for do_allow case make sure who have a know who type + * and its not a permission set. + */ + if (unallow == B_TRUE) { + *argc -= 2; + *argv += 2; + } else if (who_type != ZFS_DELEG_WHO_UNKNOWN && + who_type != ZFS_DELEG_NAMED_SET) + usage(B_FALSE); + break; + + case 2: + if (unallow == B_TRUE && (who_type == ZFS_DELEG_EVERYONE || + who_type == ZFS_DELEG_CREATE || who != NULL)) { + perms = (*argv)[0]; + *ds = (*argv)[1]; + } else { + if (unallow == B_FALSE && + (who_type == ZFS_DELEG_WHO_UNKNOWN || + who_type == ZFS_DELEG_NAMED_SET)) + usage(B_FALSE); + else if (who_type == ZFS_DELEG_WHO_UNKNOWN || + who_type == ZFS_DELEG_NAMED_SET) + who = (*argv)[0]; + else if (who_type != ZFS_DELEG_NAMED_SET) + perms = (*argv)[0]; + *ds = (*argv)[1]; + } + if (unallow == B_TRUE) { + (*argc)--; + (*argv)++; + } + break; + + case 1: + if (unallow == B_FALSE) + usage(B_FALSE); + if (who == NULL && who_type != ZFS_DELEG_CREATE && + who_type != ZFS_DELEG_EVERYONE) + usage(B_FALSE); + *ds = (*argv)[0]; + break; + + default: + usage(B_FALSE); + } + + if (strrchr(*ds, '@')) { + (void) fprintf(stderr, + gettext("Can't set or remove 'allow' permissions " + "on snapshots.\n")); + return (-1); + } + + if ((zhp = zfs_open(g_zfs, *ds, ZFS_TYPE_DATASET)) == NULL) + return (-1); + + if ((zfs_build_perms(zhp, who, perms, + who_type, deleg_type, zperms)) != 0) { + zfs_close(zhp); + return (-1); + } + zfs_close(zhp); + return (0); +} + +static int +zfs_do_allow(int argc, char **argv) +{ + char *ds; + nvlist_t *zperms = NULL; + zfs_handle_t *zhp; + int unused; + int ret; + + if ((ret = parse_allow_args(&argc, &argv, B_FALSE, &ds, + &unused, &zperms)) == -1) + return (1); + + if (ret == 1) + return (zfs_print_allows(argv[0])); + + if ((zhp = zfs_open(g_zfs, ds, ZFS_TYPE_DATASET)) == NULL) + return (1); + + if (zfs_perm_set(zhp, zperms)) { + zfs_close(zhp); + nvlist_free(zperms); + return (1); + } + nvlist_free(zperms); + zfs_close(zhp); + + return (0); +} + +static int +unallow_callback(zfs_handle_t *zhp, void *data) +{ + nvlist_t *nvp = (nvlist_t *)data; + int error; + + error = zfs_perm_remove(zhp, nvp); + if (error) { + (void) fprintf(stderr, gettext("Failed to remove permissions " + "on %s\n"), zfs_get_name(zhp)); + } + return (error); +} + +static int +zfs_do_unallow(int argc, char **argv) +{ + int recurse = B_FALSE; + char *ds; + int error; + nvlist_t *zperms = NULL; + int flags = 0; + + if (parse_allow_args(&argc, &argv, B_TRUE, + &ds, &recurse, &zperms) == -1) + return (1); + + if (recurse) + flags |= ZFS_ITER_RECURSE; + error = zfs_for_each(argc, argv, flags, + ZFS_TYPE_FILESYSTEM|ZFS_TYPE_VOLUME, NULL, + NULL, unallow_callback, (void *)zperms); + + if (zperms) + nvlist_free(zperms); + + return (error); +} + +typedef struct get_all_cbdata { + zfs_handle_t **cb_handles; + size_t cb_alloc; + size_t cb_used; + uint_t cb_types; + boolean_t cb_verbose; +} get_all_cbdata_t; + +#define CHECK_SPINNER 30 +#define SPINNER_TIME 3 /* seconds */ +#define MOUNT_TIME 5 /* seconds */ + +static int +get_one_dataset(zfs_handle_t *zhp, void *data) +{ + static char spin[] = { '-', '\\', '|', '/' }; + static int spinval = 0; + static int spincheck = 0; + static time_t last_spin_time = (time_t)0; + get_all_cbdata_t *cbp = data; + zfs_type_t type = zfs_get_type(zhp); + + if (cbp->cb_verbose) { + if (--spincheck < 0) { + time_t now = time(NULL); + if (last_spin_time + SPINNER_TIME < now) { + (void) printf("\b%c", spin[spinval++ % 4]); + (void) fflush(stdout); + last_spin_time = now; + } + spincheck = CHECK_SPINNER; + } + } + + /* + * Interate over any nested datasets. + */ + if (type == ZFS_TYPE_FILESYSTEM && + zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) { + zfs_close(zhp); + return (1); + } + + /* + * Skip any datasets whose type does not match. + */ + if ((type & cbp->cb_types) == 0) { + zfs_close(zhp); + return (0); + } + + if (cbp->cb_alloc == cbp->cb_used) { + zfs_handle_t **handles; + + if (cbp->cb_alloc == 0) + cbp->cb_alloc = 64; + else + cbp->cb_alloc *= 2; + + handles = safe_malloc(cbp->cb_alloc * sizeof (void *)); + + if (cbp->cb_handles) { + bcopy(cbp->cb_handles, handles, + cbp->cb_used * sizeof (void *)); + free(cbp->cb_handles); + } + + cbp->cb_handles = handles; + } + + cbp->cb_handles[cbp->cb_used++] = zhp; + + return (0); +} + +static void +get_all_datasets(uint_t types, zfs_handle_t ***dslist, size_t *count, + boolean_t verbose) +{ + get_all_cbdata_t cb = { 0 }; + cb.cb_types = types; + cb.cb_verbose = verbose; + + if (verbose) { + (void) printf("%s: *", gettext("Reading ZFS config")); + (void) fflush(stdout); + } + + (void) zfs_iter_root(g_zfs, get_one_dataset, &cb); + + *dslist = cb.cb_handles; + *count = cb.cb_used; + + if (verbose) { + (void) printf("\b%s\n", gettext("done.")); + } +} + +static int +dataset_cmp(const void *a, const void *b) +{ + zfs_handle_t **za = (zfs_handle_t **)a; + zfs_handle_t **zb = (zfs_handle_t **)b; + char mounta[MAXPATHLEN]; + char mountb[MAXPATHLEN]; + boolean_t gota, gotb; + + if ((gota = (zfs_get_type(*za) == ZFS_TYPE_FILESYSTEM)) != 0) + verify(zfs_prop_get(*za, ZFS_PROP_MOUNTPOINT, mounta, + sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0); + if ((gotb = (zfs_get_type(*zb) == ZFS_TYPE_FILESYSTEM)) != 0) + verify(zfs_prop_get(*zb, ZFS_PROP_MOUNTPOINT, mountb, + sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0); + + if (gota && gotb) + return (strcmp(mounta, mountb)); + + if (gota) + return (-1); + if (gotb) + return (1); + + return (strcmp(zfs_get_name(a), zfs_get_name(b))); +} + +/* + * Generic callback for sharing or mounting filesystems. Because the code is so + * similar, we have a common function with an extra parameter to determine which + * mode we are using. + */ +#define OP_SHARE 0x1 +#define OP_MOUNT 0x2 + +/* + * Share or mount a dataset. + */ +static int +share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol, + boolean_t explicit, const char *options) +{ + char mountpoint[ZFS_MAXPROPLEN]; + char shareopts[ZFS_MAXPROPLEN]; + char smbshareopts[ZFS_MAXPROPLEN]; + const char *cmdname = op == OP_SHARE ? "share" : "mount"; + struct mnttab mnt; + uint64_t zoned, canmount; + zfs_type_t type = zfs_get_type(zhp); + boolean_t shared_nfs, shared_smb; + + assert(type & (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)); + + if (type == ZFS_TYPE_FILESYSTEM) { + /* + * Check to make sure we can mount/share this dataset. If we + * are in the global zone and the filesystem is exported to a + * local zone, or if we are in a local zone and the + * filesystem is not exported, then it is an error. + */ + zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); + + if (zoned && getzoneid() == GLOBAL_ZONEID) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot %s '%s': " + "dataset is exported to a local zone\n"), cmdname, + zfs_get_name(zhp)); + return (1); + + } else if (!zoned && getzoneid() != GLOBAL_ZONEID) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot %s '%s': " + "permission denied\n"), cmdname, + zfs_get_name(zhp)); + return (1); + } + + /* + * Ignore any filesystems which don't apply to us. This + * includes those with a legacy mountpoint, or those with + * legacy share options. + */ + verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint, + sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0); + verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts, + sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0); + verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts, + sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0); + canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT); + + if (op == OP_SHARE && strcmp(shareopts, "off") == 0 && + strcmp(smbshareopts, "off") == 0) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot share '%s': " + "legacy share\n"), zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use share(1M) to " + "share this filesystem\n")); + return (1); + } + + /* + * We cannot share or mount legacy filesystems. If the + * shareopts is non-legacy but the mountpoint is legacy, we + * treat it as a legacy share. + */ + if (strcmp(mountpoint, "legacy") == 0) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot %s '%s': " + "legacy mountpoint\n"), cmdname, zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use %s(1M) to " + "%s this filesystem\n"), cmdname, cmdname); + return (1); + } + + if (strcmp(mountpoint, "none") == 0) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot %s '%s': no " + "mountpoint set\n"), cmdname, zfs_get_name(zhp)); + return (1); + } + + /* + * canmount explicit outcome + * on no pass through + * on yes pass through + * off no return 0 + * off yes display error, return 1 + * noauto no return 0 + * noauto yes pass through + */ + if (canmount == ZFS_CANMOUNT_OFF) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot %s '%s': " + "'canmount' property is set to 'off'\n"), cmdname, + zfs_get_name(zhp)); + return (1); + } else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) { + return (0); + } + + /* + * At this point, we have verified that the mountpoint and/or + * shareopts are appropriate for auto management. If the + * filesystem is already mounted or shared, return (failing + * for explicit requests); otherwise mount or share the + * filesystem. + */ + switch (op) { + case OP_SHARE: + + shared_nfs = zfs_is_shared_nfs(zhp, NULL); + shared_smb = zfs_is_shared_smb(zhp, NULL); + + if (shared_nfs && shared_smb || + (shared_nfs && strcmp(shareopts, "on") == 0 && + strcmp(smbshareopts, "off") == 0) || + (shared_smb && strcmp(smbshareopts, "on") == 0 && + strcmp(shareopts, "off") == 0)) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot share " + "'%s': filesystem already shared\n"), + zfs_get_name(zhp)); + return (1); + } + + if (!zfs_is_mounted(zhp, NULL) && + zfs_mount(zhp, NULL, 0) != 0) + return (1); + + if (protocol == NULL) { + if (zfs_shareall(zhp) != 0) + return (1); + } else if (strcmp(protocol, "nfs") == 0) { + if (zfs_share_nfs(zhp)) + return (1); + } else if (strcmp(protocol, "smb") == 0) { + if (zfs_share_smb(zhp)) + return (1); + } else { + (void) fprintf(stderr, gettext("cannot share " + "'%s': invalid share type '%s' " + "specified\n"), + zfs_get_name(zhp), protocol); + return (1); + } + + break; + + case OP_MOUNT: + if (options == NULL) + mnt.mnt_mntopts = ""; + else + mnt.mnt_mntopts = (char *)options; + + if (!hasmntopt(&mnt, MNTOPT_REMOUNT) && + zfs_is_mounted(zhp, NULL)) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot mount " + "'%s': filesystem already mounted\n"), + zfs_get_name(zhp)); + return (1); + } + + if (zfs_mount(zhp, options, flags) != 0) + return (1); + break; + } + } else { + assert(op == OP_SHARE); + + /* + * Ignore any volumes that aren't shared. + */ + verify(zfs_prop_get(zhp, ZFS_PROP_SHAREISCSI, shareopts, + sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0); + + if (strcmp(shareopts, "off") == 0) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot share '%s': " + "'shareiscsi' property not set\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("set 'shareiscsi' " + "property or use iscsitadm(1M) to share this " + "volume\n")); + return (1); + } + + if (zfs_is_shared_iscsi(zhp)) { + if (!explicit) + return (0); + + (void) fprintf(stderr, gettext("cannot share " + "'%s': volume already shared\n"), + zfs_get_name(zhp)); + return (1); + } + + if (zfs_share_iscsi(zhp) != 0) + return (1); + } + + return (0); +} + +/* + * Reports progress in the form "(current/total)". Not thread-safe. + */ +static void +report_mount_progress(int current, int total) +{ + static int len; + static char *reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b" + "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"; + static time_t last_progress_time; + time_t now = time(NULL); + + /* report 1..n instead of 0..n-1 */ + ++current; + + /* display header if we're here for the first time */ + if (current == 1) { + (void) printf(gettext("Mounting ZFS filesystems: ")); + len = 0; + } else if (current != total && last_progress_time + MOUNT_TIME >= now) { + /* too soon to report again */ + return; + } + + last_progress_time = now; + + /* back up to prepare for overwriting */ + if (len) + (void) printf("%*.*s", len, len, reverse); + + /* We put a newline at the end if this is the last one. */ + len = printf("(%d/%d)%s", current, total, current == total ? "\n" : ""); + (void) fflush(stdout); +} + +static void +append_options(char *mntopts, char *newopts) +{ + int len = strlen(mntopts); + + /* original length plus new string to append plus 1 for the comma */ + if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) { + (void) fprintf(stderr, gettext("the opts argument for " + "'%c' option is too long (more than %d chars)\n"), + "-o", MNT_LINE_MAX); + usage(B_FALSE); + } + + if (*mntopts) + mntopts[len++] = ','; + + (void) strcpy(&mntopts[len], newopts); +} + +static int +share_mount(int op, int argc, char **argv) +{ + int do_all = 0; + boolean_t verbose = B_FALSE; + int c, ret = 0; + char *options = NULL; + int types, flags = 0; + + /* check options */ + while ((c = getopt(argc, argv, op == OP_MOUNT ? ":avo:O" : "a")) + != -1) { + switch (c) { + case 'a': + do_all = 1; + break; + case 'v': + verbose = B_TRUE; + break; + case 'o': + if (*optarg == '\0') { + (void) fprintf(stderr, gettext("empty mount " + "options (-o) specified\n")); + usage(B_FALSE); + } + + if (options == NULL) + options = safe_malloc(MNT_LINE_MAX + 1); + + /* option validation is done later */ + append_options(options, optarg); + break; + + case 'O': + flags |= MS_OVERLAY; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check number of arguments */ + if (do_all) { + zfs_handle_t **dslist = NULL; + size_t i, count = 0; + char *protocol = NULL; + + if (op == OP_MOUNT) { + types = ZFS_TYPE_FILESYSTEM; + } else if (argc > 0) { + if (strcmp(argv[0], "nfs") == 0 || + strcmp(argv[0], "smb") == 0) { + types = ZFS_TYPE_FILESYSTEM; + } else if (strcmp(argv[0], "iscsi") == 0) { + types = ZFS_TYPE_VOLUME; + } else { + (void) fprintf(stderr, gettext("share type " + "must be 'nfs', 'smb' or 'iscsi'\n")); + usage(B_FALSE); + } + protocol = argv[0]; + argc--; + argv++; + } else { + types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME; + } + + if (argc != 0) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + get_all_datasets(types, &dslist, &count, verbose); + + if (count == 0) + return (0); + + qsort(dslist, count, sizeof (void *), dataset_cmp); + + for (i = 0; i < count; i++) { + if (verbose) + report_mount_progress(i, count); + + if (share_mount_one(dslist[i], op, flags, protocol, + B_FALSE, options) != 0) + ret = 1; + zfs_close(dslist[i]); + } + + free(dslist); + } else if (argc == 0) { + struct mnttab entry; + + if ((op == OP_SHARE) || (options != NULL)) { + (void) fprintf(stderr, gettext("missing filesystem " + "argument (specify -a for all)\n")); + usage(B_FALSE); + } + + /* + * When mount is given no arguments, go through /etc/mnttab and + * display any active ZFS mounts. We hide any snapshots, since + * they are controlled automatically. + */ + rewind(mnttab_file); + while (getmntent(mnttab_file, &entry) == 0) { + if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 || + strchr(entry.mnt_special, '@') != NULL) + continue; + + (void) printf("%-30s %s\n", entry.mnt_special, + entry.mnt_mountp); + } + + } else { + zfs_handle_t *zhp; + + types = ZFS_TYPE_FILESYSTEM; + if (op == OP_SHARE) + types |= ZFS_TYPE_VOLUME; + + if (argc > 1) { + (void) fprintf(stderr, + gettext("too many arguments\n")); + usage(B_FALSE); + } + + if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL) { + ret = 1; + } else { + ret = share_mount_one(zhp, op, flags, NULL, B_TRUE, + options); + zfs_close(zhp); + } + } + + return (ret); +} + +/* + * zfs mount -a [nfs | iscsi] + * zfs mount filesystem + * + * Mount all filesystems, or mount the given filesystem. + */ +static int +zfs_do_mount(int argc, char **argv) +{ + return (share_mount(OP_MOUNT, argc, argv)); +} + +/* + * zfs share -a [nfs | iscsi | smb] + * zfs share filesystem + * + * Share all filesystems, or share the given filesystem. + */ +static int +zfs_do_share(int argc, char **argv) +{ + return (share_mount(OP_SHARE, argc, argv)); +} + +typedef struct unshare_unmount_node { + zfs_handle_t *un_zhp; + char *un_mountp; + uu_avl_node_t un_avlnode; +} unshare_unmount_node_t; + +/* ARGSUSED */ +static int +unshare_unmount_compare(const void *larg, const void *rarg, void *unused) +{ + const unshare_unmount_node_t *l = larg; + const unshare_unmount_node_t *r = rarg; + + return (strcmp(l->un_mountp, r->un_mountp)); +} + +/* + * Convenience routine used by zfs_do_umount() and manual_unmount(). Given an + * absolute path, find the entry /etc/mnttab, verify that its a ZFS filesystem, + * and unmount it appropriately. + */ +static int +unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual) +{ + zfs_handle_t *zhp; + int ret; + struct stat64 statbuf; + struct extmnttab entry; + const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount"; + ino_t path_inode; + + /* + * Search for the path in /etc/mnttab. Rather than looking for the + * specific path, which can be fooled by non-standard paths (i.e. ".." + * or "//"), we stat() the path and search for the corresponding + * (major,minor) device pair. + */ + if (stat64(path, &statbuf) != 0) { + (void) fprintf(stderr, gettext("cannot %s '%s': %s\n"), + cmdname, path, strerror(errno)); + return (1); + } + path_inode = statbuf.st_ino; + + /* + * Search for the given (major,minor) pair in the mount table. + */ + rewind(mnttab_file); + while ((ret = getextmntent(mnttab_file, &entry, 0)) == 0) { + if (entry.mnt_major == major(statbuf.st_dev) && + entry.mnt_minor == minor(statbuf.st_dev)) + break; + } + if (ret != 0) { + if (op == OP_SHARE) { + (void) fprintf(stderr, gettext("cannot %s '%s': not " + "currently mounted\n"), cmdname, path); + return (1); + } + (void) fprintf(stderr, gettext("warning: %s not in mnttab\n"), + path); + if ((ret = umount2(path, flags)) != 0) + (void) fprintf(stderr, gettext("%s: %s\n"), path, + strerror(errno)); + return (ret != 0); + } + + if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) { + (void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS " + "filesystem\n"), cmdname, path); + return (1); + } + + if ((zhp = zfs_open(g_zfs, entry.mnt_special, + ZFS_TYPE_FILESYSTEM)) == NULL) + return (1); + + ret = 1; + if (stat64(entry.mnt_mountp, &statbuf) != 0) { + (void) fprintf(stderr, gettext("cannot %s '%s': %s\n"), + cmdname, path, strerror(errno)); + goto out; + } else if (statbuf.st_ino != path_inode) { + (void) fprintf(stderr, gettext("cannot " + "%s '%s': not a mountpoint\n"), cmdname, path); + goto out; + } + + if (op == OP_SHARE) { + char nfs_mnt_prop[ZFS_MAXPROPLEN]; + char smbshare_prop[ZFS_MAXPROPLEN]; + + verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop, + sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); + verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop, + sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0); + + if (strcmp(nfs_mnt_prop, "off") == 0 && + strcmp(smbshare_prop, "off") == 0) { + (void) fprintf(stderr, gettext("cannot unshare " + "'%s': legacy share\n"), path); + (void) fprintf(stderr, gettext("use " + "unshare(1M) to unshare this filesystem\n")); + } else if (!zfs_is_shared(zhp)) { + (void) fprintf(stderr, gettext("cannot unshare '%s': " + "not currently shared\n"), path); + } else { + ret = zfs_unshareall_bypath(zhp, path); + } + } else { + char mtpt_prop[ZFS_MAXPROPLEN]; + + verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop, + sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0); + + if (is_manual) { + ret = zfs_unmount(zhp, NULL, flags); + } else if (strcmp(mtpt_prop, "legacy") == 0) { + (void) fprintf(stderr, gettext("cannot unmount " + "'%s': legacy mountpoint\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use umount(1M) " + "to unmount this filesystem\n")); + } else { + ret = zfs_unmountall(zhp, flags); + } + } + +out: + zfs_close(zhp); + + return (ret != 0); +} + +/* + * Generic callback for unsharing or unmounting a filesystem. + */ +static int +unshare_unmount(int op, int argc, char **argv) +{ + int do_all = 0; + int flags = 0; + int ret = 0; + int types, c; + zfs_handle_t *zhp; + char nfsiscsi_mnt_prop[ZFS_MAXPROPLEN]; + char sharesmb[ZFS_MAXPROPLEN]; + + /* check options */ + while ((c = getopt(argc, argv, op == OP_SHARE ? "a" : "af")) != -1) { + switch (c) { + case 'a': + do_all = 1; + break; + case 'f': + flags = MS_FORCE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (do_all) { + /* + * We could make use of zfs_for_each() to walk all datasets in + * the system, but this would be very inefficient, especially + * since we would have to linearly search /etc/mnttab for each + * one. Instead, do one pass through /etc/mnttab looking for + * zfs entries and call zfs_unmount() for each one. + * + * Things get a little tricky if the administrator has created + * mountpoints beneath other ZFS filesystems. In this case, we + * have to unmount the deepest filesystems first. To accomplish + * this, we place all the mountpoints in an AVL tree sorted by + * the special type (dataset name), and walk the result in + * reverse to make sure to get any snapshots first. + */ + struct mnttab entry; + uu_avl_pool_t *pool; + uu_avl_t *tree; + unshare_unmount_node_t *node; + uu_avl_index_t idx; + uu_avl_walk_t *walk; + + if (argc != 0) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + if ((pool = uu_avl_pool_create("unmount_pool", + sizeof (unshare_unmount_node_t), + offsetof(unshare_unmount_node_t, un_avlnode), + unshare_unmount_compare, + UU_DEFAULT)) == NULL) { + (void) fprintf(stderr, gettext("internal error: " + "out of memory\n")); + exit(1); + } + + if ((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL) { + (void) fprintf(stderr, gettext("internal error: " + "out of memory\n")); + exit(1); + } + + rewind(mnttab_file); + while (getmntent(mnttab_file, &entry) == 0) { + + /* ignore non-ZFS entries */ + if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) + continue; + + /* ignore snapshots */ + if (strchr(entry.mnt_special, '@') != NULL) + continue; + + if ((zhp = zfs_open(g_zfs, entry.mnt_special, + ZFS_TYPE_FILESYSTEM)) == NULL) { + ret = 1; + continue; + } + + switch (op) { + case OP_SHARE: + verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, + nfsiscsi_mnt_prop, + sizeof (nfsiscsi_mnt_prop), + NULL, NULL, 0, B_FALSE) == 0); + if (strcmp(nfsiscsi_mnt_prop, "off") != 0) + break; + verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, + nfsiscsi_mnt_prop, + sizeof (nfsiscsi_mnt_prop), + NULL, NULL, 0, B_FALSE) == 0); + if (strcmp(nfsiscsi_mnt_prop, "off") == 0) + continue; + break; + case OP_MOUNT: + /* Ignore legacy mounts */ + verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, + nfsiscsi_mnt_prop, + sizeof (nfsiscsi_mnt_prop), + NULL, NULL, 0, B_FALSE) == 0); + if (strcmp(nfsiscsi_mnt_prop, "legacy") == 0) + continue; + /* Ignore canmount=noauto mounts */ + if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == + ZFS_CANMOUNT_NOAUTO) + continue; + default: + break; + } + + node = safe_malloc(sizeof (unshare_unmount_node_t)); + node->un_zhp = zhp; + + if ((node->un_mountp = strdup(entry.mnt_mountp)) == + NULL) { + (void) fprintf(stderr, gettext("internal error:" + " out of memory\n")); + exit(1); + } + + uu_avl_node_init(node, &node->un_avlnode, pool); + + if (uu_avl_find(tree, node, NULL, &idx) == NULL) { + uu_avl_insert(tree, node, idx); + } else { + zfs_close(node->un_zhp); + free(node->un_mountp); + free(node); + } + } + + /* + * Walk the AVL tree in reverse, unmounting each filesystem and + * removing it from the AVL tree in the process. + */ + if ((walk = uu_avl_walk_start(tree, + UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL) { + (void) fprintf(stderr, + gettext("internal error: out of memory")); + exit(1); + } + + while ((node = uu_avl_walk_next(walk)) != NULL) { + uu_avl_remove(tree, node); + + switch (op) { + case OP_SHARE: + if (zfs_unshareall_bypath(node->un_zhp, + node->un_mountp) != 0) + ret = 1; + break; + + case OP_MOUNT: + if (zfs_unmount(node->un_zhp, + node->un_mountp, flags) != 0) + ret = 1; + break; + } + + zfs_close(node->un_zhp); + free(node->un_mountp); + free(node); + } + + uu_avl_walk_end(walk); + uu_avl_destroy(tree); + uu_avl_pool_destroy(pool); + + if (op == OP_SHARE) { + /* + * Finally, unshare any volumes shared via iSCSI. + */ + zfs_handle_t **dslist = NULL; + size_t i, count = 0; + + get_all_datasets(ZFS_TYPE_VOLUME, &dslist, &count, + B_FALSE); + + if (count != 0) { + qsort(dslist, count, sizeof (void *), + dataset_cmp); + + for (i = 0; i < count; i++) { + if (zfs_unshare_iscsi(dslist[i]) != 0) + ret = 1; + zfs_close(dslist[i]); + } + + free(dslist); + } + } + } else { + if (argc != 1) { + if (argc == 0) + (void) fprintf(stderr, + gettext("missing filesystem argument\n")); + else + (void) fprintf(stderr, + gettext("too many arguments\n")); + usage(B_FALSE); + } + + /* + * We have an argument, but it may be a full path or a ZFS + * filesystem. Pass full paths off to unmount_path() (shared by + * manual_unmount), otherwise open the filesystem and pass to + * zfs_unmount(). + */ + if (argv[0][0] == '/') + return (unshare_unmount_path(op, argv[0], + flags, B_FALSE)); + + types = ZFS_TYPE_FILESYSTEM; + if (op == OP_SHARE) + types |= ZFS_TYPE_VOLUME; + + if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL) + return (1); + + if (zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { + verify(zfs_prop_get(zhp, op == OP_SHARE ? + ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT, + nfsiscsi_mnt_prop, sizeof (nfsiscsi_mnt_prop), NULL, + NULL, 0, B_FALSE) == 0); + + switch (op) { + case OP_SHARE: + verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, + nfsiscsi_mnt_prop, + sizeof (nfsiscsi_mnt_prop), + NULL, NULL, 0, B_FALSE) == 0); + verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, + sharesmb, sizeof (sharesmb), NULL, NULL, + 0, B_FALSE) == 0); + + if (strcmp(nfsiscsi_mnt_prop, "off") == 0 && + strcmp(sharesmb, "off") == 0) { + (void) fprintf(stderr, gettext("cannot " + "unshare '%s': legacy share\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use " + "unshare(1M) to unshare this " + "filesystem\n")); + ret = 1; + } else if (!zfs_is_shared(zhp)) { + (void) fprintf(stderr, gettext("cannot " + "unshare '%s': not currently " + "shared\n"), zfs_get_name(zhp)); + ret = 1; + } else if (zfs_unshareall(zhp) != 0) { + ret = 1; + } + break; + + case OP_MOUNT: + if (strcmp(nfsiscsi_mnt_prop, "legacy") == 0) { + (void) fprintf(stderr, gettext("cannot " + "unmount '%s': legacy " + "mountpoint\n"), zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("use " + "umount(1M) to unmount this " + "filesystem\n")); + ret = 1; + } else if (!zfs_is_mounted(zhp, NULL)) { + (void) fprintf(stderr, gettext("cannot " + "unmount '%s': not currently " + "mounted\n"), + zfs_get_name(zhp)); + ret = 1; + } else if (zfs_unmountall(zhp, flags) != 0) { + ret = 1; + } + break; + } + } else { + assert(op == OP_SHARE); + + verify(zfs_prop_get(zhp, ZFS_PROP_SHAREISCSI, + nfsiscsi_mnt_prop, sizeof (nfsiscsi_mnt_prop), + NULL, NULL, 0, B_FALSE) == 0); + + if (strcmp(nfsiscsi_mnt_prop, "off") == 0) { + (void) fprintf(stderr, gettext("cannot unshare " + "'%s': 'shareiscsi' property not set\n"), + zfs_get_name(zhp)); + (void) fprintf(stderr, gettext("set " + "'shareiscsi' property or use " + "iscsitadm(1M) to share this volume\n")); + ret = 1; + } else if (!zfs_is_shared_iscsi(zhp)) { + (void) fprintf(stderr, gettext("cannot " + "unshare '%s': not currently shared\n"), + zfs_get_name(zhp)); + ret = 1; + } else if (zfs_unshare_iscsi(zhp) != 0) { + ret = 1; + } + } + + zfs_close(zhp); + } + + return (ret); +} + +/* + * zfs unmount -a + * zfs unmount filesystem + * + * Unmount all filesystems, or a specific ZFS filesystem. + */ +static int +zfs_do_unmount(int argc, char **argv) +{ + return (unshare_unmount(OP_MOUNT, argc, argv)); +} + +/* + * zfs unshare -a + * zfs unshare filesystem + * + * Unshare all filesystems, or a specific ZFS filesystem. + */ +static int +zfs_do_unshare(int argc, char **argv) +{ + return (unshare_unmount(OP_SHARE, argc, argv)); +} + +/* + * Called when invoked as /etc/fs/zfs/mount. Do the mount if the mountpoint is + * 'legacy'. Otherwise, complain that use should be using 'zfs mount'. + */ +static int +manual_mount(int argc, char **argv) +{ + zfs_handle_t *zhp; + char mountpoint[ZFS_MAXPROPLEN]; + char mntopts[MNT_LINE_MAX] = { '\0' }; + int ret; + int c; + int flags = 0; + char *dataset, *path; + + /* check options */ + while ((c = getopt(argc, argv, ":mo:O")) != -1) { + switch (c) { + case 'o': + (void) strlcpy(mntopts, optarg, sizeof (mntopts)); + break; + case 'O': + flags |= MS_OVERLAY; + break; + case 'm': + flags |= MS_NOMNTTAB; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + (void) fprintf(stderr, gettext("usage: mount [-o opts] " + "\n")); + return (2); + } + } + + argc -= optind; + argv += optind; + + /* check that we only have two arguments */ + if (argc != 2) { + if (argc == 0) + (void) fprintf(stderr, gettext("missing dataset " + "argument\n")); + else if (argc == 1) + (void) fprintf(stderr, + gettext("missing mountpoint argument\n")); + else + (void) fprintf(stderr, gettext("too many arguments\n")); + (void) fprintf(stderr, "usage: mount \n"); + return (2); + } + + dataset = argv[0]; + path = argv[1]; + + /* try to open the dataset */ + if ((zhp = zfs_open(g_zfs, dataset, ZFS_TYPE_FILESYSTEM)) == NULL) + return (1); + + (void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint, + sizeof (mountpoint), NULL, NULL, 0, B_FALSE); + + /* check for legacy mountpoint and complain appropriately */ + ret = 0; + if (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0) { + if (mount(dataset, path, MS_OPTIONSTR | flags, MNTTYPE_ZFS, + NULL, 0, mntopts, sizeof (mntopts)) != 0) { + (void) fprintf(stderr, gettext("mount failed: %s\n"), + strerror(errno)); + ret = 1; + } + } else { + (void) fprintf(stderr, gettext("filesystem '%s' cannot be " + "mounted using 'mount -F zfs'\n"), dataset); + (void) fprintf(stderr, gettext("Use 'zfs set mountpoint=%s' " + "instead.\n"), path); + (void) fprintf(stderr, gettext("If you must use 'mount -F zfs' " + "or /etc/vfstab, use 'zfs set mountpoint=legacy'.\n")); + (void) fprintf(stderr, gettext("See zfs(1M) for more " + "information.\n")); + ret = 1; + } + + return (ret); +} + +/* + * Called when invoked as /etc/fs/zfs/umount. Unlike a manual mount, we allow + * unmounts of non-legacy filesystems, as this is the dominant administrative + * interface. + */ +static int +manual_unmount(int argc, char **argv) +{ + int flags = 0; + int c; + + /* check options */ + while ((c = getopt(argc, argv, "f")) != -1) { + switch (c) { + case 'f': + flags = MS_FORCE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + (void) fprintf(stderr, gettext("usage: unmount [-f] " + "\n")); + return (2); + } + } + + argc -= optind; + argv += optind; + + /* check arguments */ + if (argc != 1) { + if (argc == 0) + (void) fprintf(stderr, gettext("missing path " + "argument\n")); + else + (void) fprintf(stderr, gettext("too many arguments\n")); + (void) fprintf(stderr, gettext("usage: unmount [-f] \n")); + return (2); + } + + return (unshare_unmount_path(OP_MOUNT, argv[0], flags, B_TRUE)); +} + +static int +volcheck(zpool_handle_t *zhp, void *data) +{ + boolean_t isinit = *((boolean_t *)data); + + if (isinit) + return (zpool_create_zvol_links(zhp)); + else + return (zpool_remove_zvol_links(zhp)); +} + +/* + * Iterate over all pools in the system and either create or destroy /dev/zvol + * links, depending on the value of 'isinit'. + */ +static int +do_volcheck(boolean_t isinit) +{ + return (zpool_iter(g_zfs, volcheck, &isinit) ? 1 : 0); +} + +static int +find_command_idx(char *command, int *idx) +{ + int i; + + for (i = 0; i < NCOMMAND; i++) { + if (command_table[i].name == NULL) + continue; + + if (strcmp(command, command_table[i].name) == 0) { + *idx = i; + return (0); + } + } + return (1); +} + +int +main(int argc, char **argv) +{ + int ret; + int i; + char *progname; + char *cmdname; + + (void) setlocale(LC_ALL, ""); + (void) textdomain(TEXT_DOMAIN); + + opterr = 0; + + if ((g_zfs = libzfs_init()) == NULL) { + (void) fprintf(stderr, gettext("internal error: failed to " + "initialize ZFS library\n")); + return (1); + } + + zpool_set_history_str("zfs", argc, argv, history_str); + verify(zpool_stage_history(g_zfs, history_str) == 0); + + libzfs_print_on_error(g_zfs, B_TRUE); + + if ((mnttab_file = fopen(MNTTAB, "r")) == NULL) { + (void) fprintf(stderr, gettext("internal error: unable to " + "open %s\n"), MNTTAB); + return (1); + } + + /* + * This command also doubles as the /etc/fs mount and unmount program. + * Determine if we should take this behavior based on argv[0]. + */ + progname = basename(argv[0]); + if (strcmp(progname, "mount") == 0) { + ret = manual_mount(argc, argv); + } else if (strcmp(progname, "umount") == 0) { + ret = manual_unmount(argc, argv); + } else { + /* + * Make sure the user has specified some command. + */ + if (argc < 2) { + (void) fprintf(stderr, gettext("missing command\n")); + usage(B_FALSE); + } + + cmdname = argv[1]; + + /* + * The 'umount' command is an alias for 'unmount' + */ + if (strcmp(cmdname, "umount") == 0) + cmdname = "unmount"; + + /* + * The 'recv' command is an alias for 'receive' + */ + if (strcmp(cmdname, "recv") == 0) + cmdname = "receive"; + + /* + * Special case '-?' + */ + if (strcmp(cmdname, "-?") == 0) + usage(B_TRUE); + + /* + * 'volinit' and 'volfini' do not appear in the usage message, + * so we have to special case them here. + */ + if (strcmp(cmdname, "volinit") == 0) + return (do_volcheck(B_TRUE)); + else if (strcmp(cmdname, "volfini") == 0) + return (do_volcheck(B_FALSE)); + + /* + * Run the appropriate command. + */ + if (find_command_idx(cmdname, &i) == 0) { + current_command = &command_table[i]; + ret = command_table[i].func(argc - 1, argv + 1); + } else if (strchr(cmdname, '=') != NULL) { + verify(find_command_idx("set", &i) == 0); + current_command = &command_table[i]; + ret = command_table[i].func(argc, argv); + } else { + (void) fprintf(stderr, gettext("unrecognized " + "command '%s'\n"), cmdname); + usage(B_FALSE); + } + } + + (void) fclose(mnttab_file); + + libzfs_fini(g_zfs); + + /* + * The 'ZFS_ABORT' environment variable causes us to dump core on exit + * for the purposes of running ::findleaks. + */ + if (getenv("ZFS_ABORT") != NULL) { + (void) printf("dumping core by request\n"); + abort(); + } + + return (ret); +} diff --git a/external/cddl/osnet/dist/cmd/zfs/zfs_util.h b/external/cddl/osnet/dist/cmd/zfs/zfs_util.h new file mode 100644 index 000000000000..c7f2f1618647 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zfs/zfs_util.h @@ -0,0 +1,44 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_UTIL_H +#define _ZFS_UTIL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +void * safe_malloc(size_t size); +libzfs_handle_t *g_zfs; + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_UTIL_H */ diff --git a/external/cddl/osnet/dist/cmd/zpool/zpool_iter.c b/external/cddl/osnet/dist/cmd/zpool/zpool_iter.c new file mode 100644 index 000000000000..2f0daefd5572 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zpool/zpool_iter.c @@ -0,0 +1,252 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include + +#include + +#include "zpool_util.h" + +/* + * Private interface for iterating over pools specified on the command line. + * Most consumers will call for_each_pool, but in order to support iostat, we + * allow fined grained control through the zpool_list_t interface. + */ + +typedef struct zpool_node { + zpool_handle_t *zn_handle; + uu_avl_node_t zn_avlnode; + int zn_mark; +} zpool_node_t; + +struct zpool_list { + boolean_t zl_findall; + uu_avl_t *zl_avl; + uu_avl_pool_t *zl_pool; + zprop_list_t **zl_proplist; +}; + +/* ARGSUSED */ +static int +zpool_compare(const void *larg, const void *rarg, void *unused) +{ + zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle; + zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle; + const char *lname = zpool_get_name(l); + const char *rname = zpool_get_name(r); + + return (strcmp(lname, rname)); +} + +/* + * Callback function for pool_list_get(). Adds the given pool to the AVL tree + * of known pools. + */ +static int +add_pool(zpool_handle_t *zhp, void *data) +{ + zpool_list_t *zlp = data; + zpool_node_t *node = safe_malloc(sizeof (zpool_node_t)); + uu_avl_index_t idx; + + node->zn_handle = zhp; + uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool); + if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) { + if (zlp->zl_proplist && + zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) { + zpool_close(zhp); + free(node); + return (-1); + } + uu_avl_insert(zlp->zl_avl, node, idx); + } else { + zpool_close(zhp); + free(node); + return (-1); + } + + return (0); +} + +/* + * Create a list of pools based on the given arguments. If we're given no + * arguments, then iterate over all pools in the system and add them to the AVL + * tree. Otherwise, add only those pool explicitly specified on the command + * line. + */ +zpool_list_t * +pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err) +{ + zpool_list_t *zlp; + + zlp = safe_malloc(sizeof (zpool_list_t)); + + zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t), + offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT); + + if (zlp->zl_pool == NULL) + zpool_no_memory(); + + if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL, + UU_DEFAULT)) == NULL) + zpool_no_memory(); + + zlp->zl_proplist = proplist; + + if (argc == 0) { + (void) zpool_iter(g_zfs, add_pool, zlp); + zlp->zl_findall = B_TRUE; + } else { + int i; + + for (i = 0; i < argc; i++) { + zpool_handle_t *zhp; + + if (zhp = zpool_open_canfail(g_zfs, argv[i])) { + if (add_pool(zhp, zlp) != 0) + *err = B_TRUE; + } else { + *err = B_TRUE; + } + } + } + + return (zlp); +} + +/* + * Search for any new pools, adding them to the list. We only add pools when no + * options were given on the command line. Otherwise, we keep the list fixed as + * those that were explicitly specified. + */ +void +pool_list_update(zpool_list_t *zlp) +{ + if (zlp->zl_findall) + (void) zpool_iter(g_zfs, add_pool, zlp); +} + +/* + * Iterate over all pools in the list, executing the callback for each + */ +int +pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func, + void *data) +{ + zpool_node_t *node, *next_node; + int ret = 0; + + for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) { + next_node = uu_avl_next(zlp->zl_avl, node); + if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL || + unavail) + ret |= func(node->zn_handle, data); + } + + return (ret); +} + +/* + * Remove the given pool from the list. When running iostat, we want to remove + * those pools that no longer exist. + */ +void +pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp) +{ + zpool_node_t search, *node; + + search.zn_handle = zhp; + if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) { + uu_avl_remove(zlp->zl_avl, node); + zpool_close(node->zn_handle); + free(node); + } +} + +/* + * Free all the handles associated with this list. + */ +void +pool_list_free(zpool_list_t *zlp) +{ + uu_avl_walk_t *walk; + zpool_node_t *node; + + if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) { + (void) fprintf(stderr, + gettext("internal error: out of memory")); + exit(1); + } + + while ((node = uu_avl_walk_next(walk)) != NULL) { + uu_avl_remove(zlp->zl_avl, node); + zpool_close(node->zn_handle); + free(node); + } + + uu_avl_walk_end(walk); + uu_avl_destroy(zlp->zl_avl); + uu_avl_pool_destroy(zlp->zl_pool); + + free(zlp); +} + +/* + * Returns the number of elements in the pool list. + */ +int +pool_list_count(zpool_list_t *zlp) +{ + return (uu_avl_numnodes(zlp->zl_avl)); +} + +/* + * High level function which iterates over all pools given on the command line, + * using the pool_list_* interfaces. + */ +int +for_each_pool(int argc, char **argv, boolean_t unavail, + zprop_list_t **proplist, zpool_iter_f func, void *data) +{ + zpool_list_t *list; + int ret = 0; + + if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL) + return (1); + + if (pool_list_iter(list, unavail, func, data) != 0) + ret = 1; + + pool_list_free(list); + + return (ret); +} diff --git a/external/cddl/osnet/dist/cmd/zpool/zpool_main.c b/external/cddl/osnet/dist/cmd/zpool/zpool_main.c new file mode 100644 index 000000000000..54bba8645c66 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zpool/zpool_main.c @@ -0,0 +1,3950 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "zpool_util.h" +#include "zfs_comutil.h" + +static int zpool_do_create(int, char **); +static int zpool_do_destroy(int, char **); + +static int zpool_do_add(int, char **); +static int zpool_do_remove(int, char **); + +static int zpool_do_list(int, char **); +static int zpool_do_iostat(int, char **); +static int zpool_do_status(int, char **); + +static int zpool_do_online(int, char **); +static int zpool_do_offline(int, char **); +static int zpool_do_clear(int, char **); + +static int zpool_do_attach(int, char **); +static int zpool_do_detach(int, char **); +static int zpool_do_replace(int, char **); + +static int zpool_do_scrub(int, char **); + +static int zpool_do_import(int, char **); +static int zpool_do_export(int, char **); + +static int zpool_do_upgrade(int, char **); + +static int zpool_do_history(int, char **); + +static int zpool_do_get(int, char **); +static int zpool_do_set(int, char **); + +/* + * These libumem hooks provide a reasonable set of defaults for the allocator's + * debugging facilities. + */ + +#ifdef DEBUG +const char * +_umem_debug_init(void) +{ + return ("default,verbose"); /* $UMEM_DEBUG setting */ +} + +const char * +_umem_logging_init(void) +{ + return ("fail,contents"); /* $UMEM_LOGGING setting */ +} +#endif + +typedef enum { + HELP_ADD, + HELP_ATTACH, + HELP_CLEAR, + HELP_CREATE, + HELP_DESTROY, + HELP_DETACH, + HELP_EXPORT, + HELP_HISTORY, + HELP_IMPORT, + HELP_IOSTAT, + HELP_LIST, + HELP_OFFLINE, + HELP_ONLINE, + HELP_REPLACE, + HELP_REMOVE, + HELP_SCRUB, + HELP_STATUS, + HELP_UPGRADE, + HELP_GET, + HELP_SET +} zpool_help_t; + + +typedef struct zpool_command { + const char *name; + int (*func)(int, char **); + zpool_help_t usage; +} zpool_command_t; + +/* + * Master command table. Each ZFS command has a name, associated function, and + * usage message. The usage messages need to be internationalized, so we have + * to have a function to return the usage message based on a command index. + * + * These commands are organized according to how they are displayed in the usage + * message. An empty command (one with a NULL name) indicates an empty line in + * the generic usage message. + */ +static zpool_command_t command_table[] = { + { "create", zpool_do_create, HELP_CREATE }, + { "destroy", zpool_do_destroy, HELP_DESTROY }, + { NULL }, + { "add", zpool_do_add, HELP_ADD }, + { "remove", zpool_do_remove, HELP_REMOVE }, + { NULL }, + { "list", zpool_do_list, HELP_LIST }, + { "iostat", zpool_do_iostat, HELP_IOSTAT }, + { "status", zpool_do_status, HELP_STATUS }, + { NULL }, + { "online", zpool_do_online, HELP_ONLINE }, + { "offline", zpool_do_offline, HELP_OFFLINE }, + { "clear", zpool_do_clear, HELP_CLEAR }, + { NULL }, + { "attach", zpool_do_attach, HELP_ATTACH }, + { "detach", zpool_do_detach, HELP_DETACH }, + { "replace", zpool_do_replace, HELP_REPLACE }, + { NULL }, + { "scrub", zpool_do_scrub, HELP_SCRUB }, + { NULL }, + { "import", zpool_do_import, HELP_IMPORT }, + { "export", zpool_do_export, HELP_EXPORT }, + { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, + { NULL }, + { "history", zpool_do_history, HELP_HISTORY }, + { "get", zpool_do_get, HELP_GET }, + { "set", zpool_do_set, HELP_SET }, +}; + +#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0])) + +zpool_command_t *current_command; +static char history_str[HIS_MAX_RECORD_LEN]; + +static const char * +get_usage(zpool_help_t idx) { + switch (idx) { + case HELP_ADD: + return (gettext("\tadd [-fn] ...\n")); + case HELP_ATTACH: + return (gettext("\tattach [-f] " + "\n")); + case HELP_CLEAR: + return (gettext("\tclear [device]\n")); + case HELP_CREATE: + return (gettext("\tcreate [-fn] [-o property=value] ... \n" + "\t [-O file-system-property=value] ... \n" + "\t [-m mountpoint] [-R root] ...\n")); + case HELP_DESTROY: + return (gettext("\tdestroy [-f] \n")); + case HELP_DETACH: + return (gettext("\tdetach \n")); + case HELP_EXPORT: + return (gettext("\texport [-f] ...\n")); + case HELP_HISTORY: + return (gettext("\thistory [-il] [] ...\n")); + case HELP_IMPORT: + return (gettext("\timport [-d dir] [-D]\n" + "\timport [-o mntopts] [-o property=value] ... \n" + "\t [-d dir | -c cachefile] [-D] [-f] [-R root] -a\n" + "\timport [-o mntopts] [-o property=value] ... \n" + "\t [-d dir | -c cachefile] [-D] [-f] [-R root] " + " [newpool]\n")); + case HELP_IOSTAT: + return (gettext("\tiostat [-v] [pool] ... [interval " + "[count]]\n")); + case HELP_LIST: + return (gettext("\tlist [-H] [-o property[,...]] " + "[pool] ...\n")); + case HELP_OFFLINE: + return (gettext("\toffline [-t] ...\n")); + case HELP_ONLINE: + return (gettext("\tonline ...\n")); + case HELP_REPLACE: + return (gettext("\treplace [-f] " + "[new-device]\n")); + case HELP_REMOVE: + return (gettext("\tremove ...\n")); + case HELP_SCRUB: + return (gettext("\tscrub [-s] ...\n")); + case HELP_STATUS: + return (gettext("\tstatus [-vx] [pool] ...\n")); + case HELP_UPGRADE: + return (gettext("\tupgrade\n" + "\tupgrade -v\n" + "\tupgrade [-V version] <-a | pool ...>\n")); + case HELP_GET: + return (gettext("\tget <\"all\" | property[,...]> " + " ...\n")); + case HELP_SET: + return (gettext("\tset \n")); + } + + abort(); + /* NOTREACHED */ +} + + +/* + * Callback routine that will print out a pool property value. + */ +static int +print_prop_cb(int prop, void *cb) +{ + FILE *fp = cb; + + (void) fprintf(fp, "\t%-13s ", zpool_prop_to_name(prop)); + + if (zpool_prop_readonly(prop)) + (void) fprintf(fp, " NO "); + else + (void) fprintf(fp, " YES "); + + if (zpool_prop_values(prop) == NULL) + (void) fprintf(fp, "-\n"); + else + (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); + + return (ZPROP_CONT); +} + +/* + * Display usage message. If we're inside a command, display only the usage for + * that command. Otherwise, iterate over the entire command table and display + * a complete usage message. + */ +void +usage(boolean_t requested) +{ + FILE *fp = requested ? stdout : stderr; + + if (current_command == NULL) { + int i; + + (void) fprintf(fp, gettext("usage: zpool command args ...\n")); + (void) fprintf(fp, + gettext("where 'command' is one of the following:\n\n")); + + for (i = 0; i < NCOMMAND; i++) { + if (command_table[i].name == NULL) + (void) fprintf(fp, "\n"); + else + (void) fprintf(fp, "%s", + get_usage(command_table[i].usage)); + } + } else { + (void) fprintf(fp, gettext("usage:\n")); + (void) fprintf(fp, "%s", get_usage(current_command->usage)); + } + + if (current_command != NULL && + ((strcmp(current_command->name, "set") == 0) || + (strcmp(current_command->name, "get") == 0) || + (strcmp(current_command->name, "list") == 0))) { + + (void) fprintf(fp, + gettext("\nthe following properties are supported:\n")); + + (void) fprintf(fp, "\n\t%-13s %s %s\n\n", + "PROPERTY", "EDIT", "VALUES"); + + /* Iterate over all properties */ + (void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE, + ZFS_TYPE_POOL); + } + + /* + * See comments at end of main(). + */ + if (getenv("ZFS_ABORT") != NULL) { + (void) printf("dumping core by request\n"); + abort(); + } + + exit(requested ? 0 : 2); +} + +void +print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, + boolean_t print_logs) +{ + nvlist_t **child; + uint_t c, children; + char *vname; + + if (name != NULL) + (void) printf("\t%*s%s\n", indent, "", name); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + return; + + for (c = 0; c < children; c++) { + uint64_t is_log = B_FALSE; + + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + if ((is_log && !print_logs) || (!is_log && print_logs)) + continue; + + vname = zpool_vdev_name(g_zfs, zhp, child[c]); + print_vdev_tree(zhp, vname, child[c], indent + 2, + B_FALSE); + free(vname); + } +} + +/* + * Add a property pair (name, string-value) into a property nvlist. + */ +static int +add_prop_list(const char *propname, char *propval, nvlist_t **props, + boolean_t poolprop) +{ + zpool_prop_t prop = ZPROP_INVAL; + zfs_prop_t fprop; + nvlist_t *proplist; + const char *normnm; + char *strval; + + if (*props == NULL && + nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { + (void) fprintf(stderr, + gettext("internal error: out of memory\n")); + return (1); + } + + proplist = *props; + + if (poolprop) { + if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { + (void) fprintf(stderr, gettext("property '%s' is " + "not a valid pool property\n"), propname); + return (2); + } + normnm = zpool_prop_to_name(prop); + } else { + if ((fprop = zfs_name_to_prop(propname)) == ZPROP_INVAL) { + (void) fprintf(stderr, gettext("property '%s' is " + "not a valid file system property\n"), propname); + return (2); + } + normnm = zfs_prop_to_name(fprop); + } + + if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && + prop != ZPOOL_PROP_CACHEFILE) { + (void) fprintf(stderr, gettext("property '%s' " + "specified multiple times\n"), propname); + return (2); + } + + if (nvlist_add_string(proplist, normnm, propval) != 0) { + (void) fprintf(stderr, gettext("internal " + "error: out of memory\n")); + return (1); + } + + return (0); +} + +/* + * zpool add [-fn] ... + * + * -f Force addition of devices, even if they appear in use + * -n Do not add the devices, but display the resulting layout if + * they were to be added. + * + * Adds the given vdevs to 'pool'. As with create, the bulk of this work is + * handled by get_vdev_spec(), which constructs the nvlist needed to pass to + * libzfs. + */ +int +zpool_do_add(int argc, char **argv) +{ + boolean_t force = B_FALSE; + boolean_t dryrun = B_FALSE; + int c; + nvlist_t *nvroot; + char *poolname; + int ret; + zpool_handle_t *zhp; + nvlist_t *config; + + /* check options */ + while ((c = getopt(argc, argv, "fn")) != -1) { + switch (c) { + case 'f': + force = B_TRUE; + break; + case 'n': + dryrun = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing vdev specification\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + + argc--; + argv++; + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + if ((config = zpool_get_config(zhp, NULL)) == NULL) { + (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), + poolname); + zpool_close(zhp); + return (1); + } + + /* pass off to get_vdev_spec for processing */ + nvroot = make_root_vdev(zhp, force, !force, B_FALSE, dryrun, + argc, argv); + if (nvroot == NULL) { + zpool_close(zhp); + return (1); + } + + if (dryrun) { + nvlist_t *poolnvroot; + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &poolnvroot) == 0); + + (void) printf(gettext("would update '%s' to the following " + "configuration:\n"), zpool_get_name(zhp)); + + /* print original main pool and new tree */ + print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE); + print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE); + + /* Do the same for the logs */ + if (num_logs(poolnvroot) > 0) { + print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE); + print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE); + } else if (num_logs(nvroot) > 0) { + print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE); + } + + ret = 0; + } else { + ret = (zpool_add(zhp, nvroot) != 0); + } + + nvlist_free(nvroot); + zpool_close(zhp); + + return (ret); +} + +/* + * zpool remove ... + * + * Removes the given vdev from the pool. Currently, this only supports removing + * spares and cache devices from the pool. Eventually, we'll want to support + * removing leaf vdevs (as an alias for 'detach') as well as toplevel vdevs. + */ +int +zpool_do_remove(int argc, char **argv) +{ + char *poolname; + int i, ret = 0; + zpool_handle_t *zhp; + + argc--; + argv++; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing device\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + for (i = 1; i < argc; i++) { + if (zpool_vdev_remove(zhp, argv[i]) != 0) + ret = 1; + } + + return (ret); +} + +/* + * zpool create [-fn] [-o property=value] ... + * [-O file-system-property=value] ... + * [-R root] [-m mountpoint] ... + * + * -f Force creation, even if devices appear in use + * -n Do not create the pool, but display the resulting layout if it + * were to be created. + * -R Create a pool under an alternate root + * -m Set default mountpoint for the root dataset. By default it's + * '/' + * -o Set property=value. + * -O Set fsproperty=value in the pool's root file system + * + * Creates the named pool according to the given vdev specification. The + * bulk of the vdev processing is done in get_vdev_spec() in zpool_vdev.c. Once + * we get the nvlist back from get_vdev_spec(), we either print out the contents + * (if '-n' was specified), or pass it to libzfs to do the creation. + */ +int +zpool_do_create(int argc, char **argv) +{ + boolean_t force = B_FALSE; + boolean_t dryrun = B_FALSE; + int c; + nvlist_t *nvroot = NULL; + char *poolname; + int ret = 1; + char *altroot = NULL; + char *mountpoint = NULL; + nvlist_t *fsprops = NULL; + nvlist_t *props = NULL; + char *propval; + + /* check options */ + while ((c = getopt(argc, argv, ":fnR:m:o:O:")) != -1) { + switch (c) { + case 'f': + force = B_TRUE; + break; + case 'n': + dryrun = B_TRUE; + break; + case 'R': + altroot = optarg; + if (add_prop_list(zpool_prop_to_name( + ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) + goto errout; + if (nvlist_lookup_string(props, + zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), + &propval) == 0) + break; + if (add_prop_list(zpool_prop_to_name( + ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) + goto errout; + break; + case 'm': + mountpoint = optarg; + break; + case 'o': + if ((propval = strchr(optarg, '=')) == NULL) { + (void) fprintf(stderr, gettext("missing " + "'=' for -o option\n")); + goto errout; + } + *propval = '\0'; + propval++; + + if (add_prop_list(optarg, propval, &props, B_TRUE)) + goto errout; + break; + case 'O': + if ((propval = strchr(optarg, '=')) == NULL) { + (void) fprintf(stderr, gettext("missing " + "'=' for -O option\n")); + goto errout; + } + *propval = '\0'; + propval++; + + if (add_prop_list(optarg, propval, &fsprops, B_FALSE)) + goto errout; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + goto badusage; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + goto badusage; + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + goto badusage; + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing vdev specification\n")); + goto badusage; + } + + poolname = argv[0]; + + /* + * As a special case, check for use of '/' in the name, and direct the + * user to use 'zfs create' instead. + */ + if (strchr(poolname, '/') != NULL) { + (void) fprintf(stderr, gettext("cannot create '%s': invalid " + "character '/' in pool name\n"), poolname); + (void) fprintf(stderr, gettext("use 'zfs create' to " + "create a dataset\n")); + goto errout; + } + + /* pass off to get_vdev_spec for bulk processing */ + nvroot = make_root_vdev(NULL, force, !force, B_FALSE, dryrun, + argc - 1, argv + 1); + if (nvroot == NULL) + goto errout; + + /* make_root_vdev() allows 0 toplevel children if there are spares */ + if (!zfs_allocatable_devs(nvroot)) { + (void) fprintf(stderr, gettext("invalid vdev " + "specification: at least one toplevel vdev must be " + "specified\n")); + goto errout; + } + + + if (altroot != NULL && altroot[0] != '/') { + (void) fprintf(stderr, gettext("invalid alternate root '%s': " + "must be an absolute path\n"), altroot); + goto errout; + } + + /* + * Check the validity of the mountpoint and direct the user to use the + * '-m' mountpoint option if it looks like its in use. + */ + if (mountpoint == NULL || + (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && + strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { + char buf[MAXPATHLEN]; + DIR *dirp; + + if (mountpoint && mountpoint[0] != '/') { + (void) fprintf(stderr, gettext("invalid mountpoint " + "'%s': must be an absolute path, 'legacy', or " + "'none'\n"), mountpoint); + goto errout; + } + + if (mountpoint == NULL) { + if (altroot != NULL) + (void) snprintf(buf, sizeof (buf), "%s/%s", + altroot, poolname); + else + (void) snprintf(buf, sizeof (buf), "/%s", + poolname); + } else { + if (altroot != NULL) + (void) snprintf(buf, sizeof (buf), "%s%s", + altroot, mountpoint); + else + (void) snprintf(buf, sizeof (buf), "%s", + mountpoint); + } + + if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { + (void) fprintf(stderr, gettext("mountpoint '%s' : " + "%s\n"), buf, strerror(errno)); + (void) fprintf(stderr, gettext("use '-m' " + "option to provide a different default\n")); + goto errout; + } else if (dirp) { + int count = 0; + + while (count < 3 && readdir(dirp) != NULL) + count++; + (void) closedir(dirp); + + if (count > 2) { + (void) fprintf(stderr, gettext("mountpoint " + "'%s' exists and is not empty\n"), buf); + (void) fprintf(stderr, gettext("use '-m' " + "option to provide a " + "different default\n")); + goto errout; + } + } + } + + if (dryrun) { + /* + * For a dry run invocation, print out a basic message and run + * through all the vdevs in the list and print out in an + * appropriate hierarchy. + */ + (void) printf(gettext("would create '%s' with the " + "following layout:\n\n"), poolname); + + print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE); + if (num_logs(nvroot) > 0) + print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE); + + ret = 0; + } else { + /* + * Hand off to libzfs. + */ + if (zpool_create(g_zfs, poolname, + nvroot, props, fsprops) == 0) { + zfs_handle_t *pool = zfs_open(g_zfs, poolname, + ZFS_TYPE_FILESYSTEM); + if (pool != NULL) { + if (mountpoint != NULL) + verify(zfs_prop_set(pool, + zfs_prop_to_name( + ZFS_PROP_MOUNTPOINT), + mountpoint) == 0); + if (zfs_mount(pool, NULL, 0) == 0) + ret = zfs_shareall(pool); + zfs_close(pool); + } + } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { + (void) fprintf(stderr, gettext("pool name may have " + "been omitted\n")); + } + } + +errout: + nvlist_free(nvroot); + nvlist_free(fsprops); + nvlist_free(props); + return (ret); +badusage: + nvlist_free(fsprops); + nvlist_free(props); + usage(B_FALSE); + return (2); +} + +/* + * zpool destroy + * + * -f Forcefully unmount any datasets + * + * Destroy the given pool. Automatically unmounts any datasets in the pool. + */ +int +zpool_do_destroy(int argc, char **argv) +{ + boolean_t force = B_FALSE; + int c; + char *pool; + zpool_handle_t *zhp; + int ret; + + /* check options */ + while ((c = getopt(argc, argv, "f")) != -1) { + switch (c) { + case 'f': + force = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool argument\n")); + usage(B_FALSE); + } + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + pool = argv[0]; + + if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { + /* + * As a special case, check for use of '/' in the name, and + * direct the user to use 'zfs destroy' instead. + */ + if (strchr(pool, '/') != NULL) + (void) fprintf(stderr, gettext("use 'zfs destroy' to " + "destroy a dataset\n")); + return (1); + } + + if (zpool_disable_datasets(zhp, force) != 0) { + (void) fprintf(stderr, gettext("could not destroy '%s': " + "could not unmount datasets\n"), zpool_get_name(zhp)); + return (1); + } + + ret = (zpool_destroy(zhp) != 0); + + zpool_close(zhp); + + return (ret); +} + +/* + * zpool export [-f] ... + * + * -f Forcefully unmount datasets + * + * Export the given pools. By default, the command will attempt to cleanly + * unmount any active datasets within the pool. If the '-f' flag is specified, + * then the datasets will be forcefully unmounted. + */ +int +zpool_do_export(int argc, char **argv) +{ + boolean_t force = B_FALSE; + int c; + zpool_handle_t *zhp; + int ret; + int i; + + /* check options */ + while ((c = getopt(argc, argv, "f")) != -1) { + switch (c) { + case 'f': + force = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* check arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool argument\n")); + usage(B_FALSE); + } + + ret = 0; + for (i = 0; i < argc; i++) { + if ((zhp = zpool_open_canfail(g_zfs, argv[i])) == NULL) { + ret = 1; + continue; + } + + if (zpool_disable_datasets(zhp, force) != 0) { + ret = 1; + zpool_close(zhp); + continue; + } + + if (zpool_export(zhp, force) != 0) + ret = 1; + + zpool_close(zhp); + } + + return (ret); +} + +/* + * Given a vdev configuration, determine the maximum width needed for the device + * name column. + */ +static int +max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) +{ + char *name = zpool_vdev_name(g_zfs, zhp, nv); + nvlist_t **child; + uint_t c, children; + int ret; + + if (strlen(name) + depth > max) + max = strlen(name) + depth; + + free(name); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if ((ret = max_width(zhp, child[c], depth + 2, + max)) > max) + max = ret; + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if ((ret = max_width(zhp, child[c], depth + 2, + max)) > max) + max = ret; + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if ((ret = max_width(zhp, child[c], depth + 2, + max)) > max) + max = ret; + } + + + return (max); +} + + +/* + * Print the configuration of an exported pool. Iterate over all vdevs in the + * pool, printing out the name and status for each one. + */ +void +print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth, + boolean_t print_logs) +{ + nvlist_t **child; + uint_t c, children; + vdev_stat_t *vs; + char *type, *vname; + + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); + if (strcmp(type, VDEV_TYPE_MISSING) == 0) + return; + + verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &c) == 0); + + (void) printf("\t%*s%-*s", depth, "", namewidth - depth, name); + (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); + + if (vs->vs_aux != 0) { + (void) printf(" "); + + switch (vs->vs_aux) { + case VDEV_AUX_OPEN_FAILED: + (void) printf(gettext("cannot open")); + break; + + case VDEV_AUX_BAD_GUID_SUM: + (void) printf(gettext("missing device")); + break; + + case VDEV_AUX_NO_REPLICAS: + (void) printf(gettext("insufficient replicas")); + break; + + case VDEV_AUX_VERSION_NEWER: + (void) printf(gettext("newer version")); + break; + + case VDEV_AUX_ERR_EXCEEDED: + (void) printf(gettext("too many errors")); + break; + + default: + (void) printf(gettext("corrupted data")); + break; + } + } + (void) printf("\n"); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + return; + + for (c = 0; c < children; c++) { + uint64_t is_log = B_FALSE; + + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + if ((is_log && !print_logs) || (!is_log && print_logs)) + continue; + + vname = zpool_vdev_name(g_zfs, NULL, child[c]); + print_import_config(vname, child[c], + namewidth, depth + 2, B_FALSE); + free(vname); + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &child, &children) == 0) { + (void) printf(gettext("\tcache\n")); + for (c = 0; c < children; c++) { + vname = zpool_vdev_name(g_zfs, NULL, child[c]); + (void) printf("\t %s\n", vname); + free(vname); + } + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &child, &children) == 0) { + (void) printf(gettext("\tspares\n")); + for (c = 0; c < children; c++) { + vname = zpool_vdev_name(g_zfs, NULL, child[c]); + (void) printf("\t %s\n", vname); + free(vname); + } + } +} + +/* + * Display the status for the given pool. + */ +static void +show_import(nvlist_t *config) +{ + uint64_t pool_state; + vdev_stat_t *vs; + char *name; + uint64_t guid; + char *msgid; + nvlist_t *nvroot; + int reason; + const char *health; + uint_t vsc; + int namewidth; + + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &guid) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, + &pool_state) == 0); + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &vsc) == 0); + health = zpool_state_to_name(vs->vs_state, vs->vs_aux); + + reason = zpool_import_status(config, &msgid); + + (void) printf(gettext(" pool: %s\n"), name); + (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); + (void) printf(gettext(" state: %s"), health); + if (pool_state == POOL_STATE_DESTROYED) + (void) printf(gettext(" (DESTROYED)")); + (void) printf("\n"); + + switch (reason) { + case ZPOOL_STATUS_MISSING_DEV_R: + case ZPOOL_STATUS_MISSING_DEV_NR: + case ZPOOL_STATUS_BAD_GUID_SUM: + (void) printf(gettext("status: One or more devices are missing " + "from the system.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_LABEL_R: + case ZPOOL_STATUS_CORRUPT_LABEL_NR: + (void) printf(gettext("status: One or more devices contains " + "corrupted data.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_DATA: + (void) printf(gettext("status: The pool data is corrupted.\n")); + break; + + case ZPOOL_STATUS_OFFLINE_DEV: + (void) printf(gettext("status: One or more devices " + "are offlined.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_POOL: + (void) printf(gettext("status: The pool metadata is " + "corrupted.\n")); + break; + + case ZPOOL_STATUS_VERSION_OLDER: + (void) printf(gettext("status: The pool is formatted using an " + "older on-disk version.\n")); + break; + + case ZPOOL_STATUS_VERSION_NEWER: + (void) printf(gettext("status: The pool is formatted using an " + "incompatible version.\n")); + break; + + case ZPOOL_STATUS_HOSTID_MISMATCH: + (void) printf(gettext("status: The pool was last accessed by " + "another system.\n")); + break; + + case ZPOOL_STATUS_FAULTED_DEV_R: + case ZPOOL_STATUS_FAULTED_DEV_NR: + (void) printf(gettext("status: One or more devices are " + "faulted.\n")); + break; + + case ZPOOL_STATUS_BAD_LOG: + (void) printf(gettext("status: An intent log record cannot be " + "read.\n")); + break; + + default: + /* + * No other status can be seen when importing pools. + */ + assert(reason == ZPOOL_STATUS_OK); + } + + /* + * Print out an action according to the overall state of the pool. + */ + if (vs->vs_state == VDEV_STATE_HEALTHY) { + if (reason == ZPOOL_STATUS_VERSION_OLDER) + (void) printf(gettext("action: The pool can be " + "imported using its name or numeric identifier, " + "though\n\tsome features will not be available " + "without an explicit 'zpool upgrade'.\n")); + else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) + (void) printf(gettext("action: The pool can be " + "imported using its name or numeric " + "identifier and\n\tthe '-f' flag.\n")); + else + (void) printf(gettext("action: The pool can be " + "imported using its name or numeric " + "identifier.\n")); + } else if (vs->vs_state == VDEV_STATE_DEGRADED) { + (void) printf(gettext("action: The pool can be imported " + "despite missing or damaged devices. The\n\tfault " + "tolerance of the pool may be compromised if imported.\n")); + } else { + switch (reason) { + case ZPOOL_STATUS_VERSION_NEWER: + (void) printf(gettext("action: The pool cannot be " + "imported. Access the pool on a system running " + "newer\n\tsoftware, or recreate the pool from " + "backup.\n")); + break; + case ZPOOL_STATUS_MISSING_DEV_R: + case ZPOOL_STATUS_MISSING_DEV_NR: + case ZPOOL_STATUS_BAD_GUID_SUM: + (void) printf(gettext("action: The pool cannot be " + "imported. Attach the missing\n\tdevices and try " + "again.\n")); + break; + default: + (void) printf(gettext("action: The pool cannot be " + "imported due to damaged devices or data.\n")); + } + } + + /* + * If the state is "closed" or "can't open", and the aux state + * is "corrupt data": + */ + if (((vs->vs_state == VDEV_STATE_CLOSED) || + (vs->vs_state == VDEV_STATE_CANT_OPEN)) && + (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) { + if (pool_state == POOL_STATE_DESTROYED) + (void) printf(gettext("\tThe pool was destroyed, " + "but can be imported using the '-Df' flags.\n")); + else if (pool_state != POOL_STATE_EXPORTED) + (void) printf(gettext("\tThe pool may be active on " + "another system, but can be imported using\n\t" + "the '-f' flag.\n")); + } + + if (msgid != NULL) + (void) printf(gettext(" see: http://www.sun.com/msg/%s\n"), + msgid); + + (void) printf(gettext("config:\n\n")); + + namewidth = max_width(NULL, nvroot, 0, 0); + if (namewidth < 10) + namewidth = 10; + + print_import_config(name, nvroot, namewidth, 0, B_FALSE); + if (num_logs(nvroot) > 0) { + (void) printf(gettext("\tlogs\n")); + print_import_config(name, nvroot, namewidth, 0, B_TRUE); + } + + if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { + (void) printf(gettext("\n\tAdditional devices are known to " + "be part of this pool, though their\n\texact " + "configuration cannot be determined.\n")); + } +} + +/* + * Perform the import for the given configuration. This passes the heavy + * lifting off to zpool_import_props(), and then mounts the datasets contained + * within the pool. + */ +static int +do_import(nvlist_t *config, const char *newname, const char *mntopts, + int force, nvlist_t *props, boolean_t allowfaulted) +{ + zpool_handle_t *zhp; + char *name; + uint64_t state; + uint64_t version; + int error = 0; + + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + + verify(nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_STATE, &state) == 0); + verify(nvlist_lookup_uint64(config, + ZPOOL_CONFIG_VERSION, &version) == 0); + if (version > SPA_VERSION) { + (void) fprintf(stderr, gettext("cannot import '%s': pool " + "is formatted using a newer ZFS version\n"), name); + return (1); + } else if (state != POOL_STATE_EXPORTED && !force) { + uint64_t hostid; + + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, + &hostid) == 0) { + if ((unsigned long)hostid != gethostid()) { + char *hostname; + uint64_t timestamp; + time_t t; + + verify(nvlist_lookup_string(config, + ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); + verify(nvlist_lookup_uint64(config, + ZPOOL_CONFIG_TIMESTAMP, ×tamp) == 0); + t = timestamp; + (void) fprintf(stderr, gettext("cannot import " + "'%s': pool may be in use from other " + "system, it was last accessed by %s " + "(hostid: 0x%lx) on %s"), name, hostname, + (unsigned long)hostid, + asctime(localtime(&t))); + (void) fprintf(stderr, gettext("use '-f' to " + "import anyway\n")); + return (1); + } + } else { + (void) fprintf(stderr, gettext("cannot import '%s': " + "pool may be in use from other system\n"), name); + (void) fprintf(stderr, gettext("use '-f' to import " + "anyway\n")); + return (1); + } + } + + if (zpool_import_props(g_zfs, config, newname, props, + allowfaulted) != 0) + return (1); + + if (newname != NULL) + name = (char *)newname; + + verify((zhp = zpool_open_canfail(g_zfs, name)) != NULL); + + if (zpool_enable_datasets(zhp, mntopts, 0) != 0) { + zpool_close(zhp); + return (1); + } + + zpool_close(zhp); + return (error); +} + +/* + * zpool import [-d dir] [-D] + * import [-o mntopts] [-o prop=value] ... [-R root] [-D] + * [-d dir | -c cachefile] [-f] -a + * import [-o mntopts] [-o prop=value] ... [-R root] [-D] + * [-d dir | -c cachefile] [-f] [newpool] + * + * -c Read pool information from a cachefile instead of searching + * devices. + * + * -d Scan in a specific directory, other than /dev/dsk. More than + * one directory can be specified using multiple '-d' options. + * + * -D Scan for previously destroyed pools or import all or only + * specified destroyed pools. + * + * -R Temporarily import the pool, with all mountpoints relative to + * the given root. The pool will remain exported when the machine + * is rebooted. + * + * -f Force import, even if it appears that the pool is active. + * + * -F Import even in the presence of faulted vdevs. This is an + * intentionally undocumented option for testing purposes, and + * treats the pool configuration as complete, leaving any bad + * vdevs in the FAULTED state. + * + * -a Import all pools found. + * + * -o Set property=value and/or temporary mount options (without '='). + * + * The import command scans for pools to import, and import pools based on pool + * name and GUID. The pool can also be renamed as part of the import process. + */ +int +zpool_do_import(int argc, char **argv) +{ + char **searchdirs = NULL; + int nsearch = 0; + int c; + int err; + nvlist_t *pools = NULL; + boolean_t do_all = B_FALSE; + boolean_t do_destroyed = B_FALSE; + char *mntopts = NULL; + boolean_t do_force = B_FALSE; + nvpair_t *elem; + nvlist_t *config; + uint64_t searchguid = 0; + char *searchname = NULL; + char *propval; + nvlist_t *found_config; + nvlist_t *props = NULL; + boolean_t first; + boolean_t allow_faulted = B_FALSE; + uint64_t pool_state; + char *cachefile = NULL; + + /* check options */ + while ((c = getopt(argc, argv, ":ac:d:DfFo:p:R:")) != -1) { + switch (c) { + case 'a': + do_all = B_TRUE; + break; + case 'c': + cachefile = optarg; + break; + case 'd': + if (searchdirs == NULL) { + searchdirs = safe_malloc(sizeof (char *)); + } else { + char **tmp = safe_malloc((nsearch + 1) * + sizeof (char *)); + bcopy(searchdirs, tmp, nsearch * + sizeof (char *)); + free(searchdirs); + searchdirs = tmp; + } + searchdirs[nsearch++] = optarg; + break; + case 'D': + do_destroyed = B_TRUE; + break; + case 'f': + do_force = B_TRUE; + break; + case 'F': + allow_faulted = B_TRUE; + break; + case 'o': + if ((propval = strchr(optarg, '=')) != NULL) { + *propval = '\0'; + propval++; + if (add_prop_list(optarg, propval, + &props, B_TRUE)) + goto error; + } else { + mntopts = optarg; + } + break; + case 'R': + if (add_prop_list(zpool_prop_to_name( + ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) + goto error; + if (nvlist_lookup_string(props, + zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), + &propval) == 0) + break; + if (add_prop_list(zpool_prop_to_name( + ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE)) + goto error; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (cachefile && nsearch != 0) { + (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); + usage(B_FALSE); + } + + if (searchdirs == NULL) { + searchdirs = safe_malloc(sizeof (char *)); + searchdirs[0] = "/dev/dsk"; + nsearch = 1; + } + + /* check argument count */ + if (do_all) { + if (argc != 0) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + } else { + if (argc > 2) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + /* + * Check for the SYS_CONFIG privilege. We do this explicitly + * here because otherwise any attempt to discover pools will + * silently fail. + */ + if (argc == 0 && !priv_ineffect(PRIV_SYS_CONFIG)) { + (void) fprintf(stderr, gettext("cannot " + "discover pools: permission denied\n")); + free(searchdirs); + return (1); + } + } + + /* + * Depending on the arguments given, we do one of the following: + * + * Iterate through all pools and display information about + * each one. + * + * -a Iterate through all pools and try to import each one. + * + * Find the pool that corresponds to the given GUID/pool + * name and import that one. + * + * -D Above options applies only to destroyed pools. + */ + if (argc != 0) { + char *endptr; + + errno = 0; + searchguid = strtoull(argv[0], &endptr, 10); + if (errno != 0 || *endptr != '\0') + searchname = argv[0]; + found_config = NULL; + } + + if (cachefile) { + pools = zpool_find_import_cached(g_zfs, cachefile, searchname, + searchguid); + } else if (searchname != NULL) { + pools = zpool_find_import_byname(g_zfs, nsearch, searchdirs, + searchname); + } else { + /* + * It's OK to search by guid even if searchguid is 0. + */ + pools = zpool_find_import_byguid(g_zfs, nsearch, searchdirs, + searchguid); + } + + if (pools == NULL) { + if (argc != 0) { + (void) fprintf(stderr, gettext("cannot import '%s': " + "no such pool available\n"), argv[0]); + } + free(searchdirs); + return (1); + } + + /* + * At this point we have a list of import candidate configs. Even if + * we were searching by pool name or guid, we still need to + * post-process the list to deal with pool state and possible + * duplicate names. + */ + err = 0; + elem = NULL; + first = B_TRUE; + while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { + + verify(nvpair_value_nvlist(elem, &config) == 0); + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, + &pool_state) == 0); + if (!do_destroyed && pool_state == POOL_STATE_DESTROYED) + continue; + if (do_destroyed && pool_state != POOL_STATE_DESTROYED) + continue; + + if (argc == 0) { + if (first) + first = B_FALSE; + else if (!do_all) + (void) printf("\n"); + + if (do_all) + err |= do_import(config, NULL, mntopts, + do_force, props, allow_faulted); + else + show_import(config); + } else if (searchname != NULL) { + char *name; + + /* + * We are searching for a pool based on name. + */ + verify(nvlist_lookup_string(config, + ZPOOL_CONFIG_POOL_NAME, &name) == 0); + + if (strcmp(name, searchname) == 0) { + if (found_config != NULL) { + (void) fprintf(stderr, gettext( + "cannot import '%s': more than " + "one matching pool\n"), searchname); + (void) fprintf(stderr, gettext( + "import by numeric ID instead\n")); + err = B_TRUE; + } + found_config = config; + } + } else { + uint64_t guid; + + /* + * Search for a pool by guid. + */ + verify(nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_GUID, &guid) == 0); + + if (guid == searchguid) + found_config = config; + } + } + + /* + * If we were searching for a specific pool, verify that we found a + * pool, and then do the import. + */ + if (argc != 0 && err == 0) { + if (found_config == NULL) { + (void) fprintf(stderr, gettext("cannot import '%s': " + "no such pool available\n"), argv[0]); + err = B_TRUE; + } else { + err |= do_import(found_config, argc == 1 ? NULL : + argv[1], mntopts, do_force, props, allow_faulted); + } + } + + /* + * If we were just looking for pools, report an error if none were + * found. + */ + if (argc == 0 && first) + (void) fprintf(stderr, + gettext("no pools available to import\n")); + +error: + nvlist_free(props); + nvlist_free(pools); + free(searchdirs); + + return (err ? 1 : 0); +} + +typedef struct iostat_cbdata { + zpool_list_t *cb_list; + int cb_verbose; + int cb_iteration; + int cb_namewidth; +} iostat_cbdata_t; + +static void +print_iostat_separator(iostat_cbdata_t *cb) +{ + int i = 0; + + for (i = 0; i < cb->cb_namewidth; i++) + (void) printf("-"); + (void) printf(" ----- ----- ----- ----- ----- -----\n"); +} + +static void +print_iostat_header(iostat_cbdata_t *cb) +{ + (void) printf("%*s capacity operations bandwidth\n", + cb->cb_namewidth, ""); + (void) printf("%-*s used avail read write read write\n", + cb->cb_namewidth, "pool"); + print_iostat_separator(cb); +} + +/* + * Display a single statistic. + */ +static void +print_one_stat(uint64_t value) +{ + char buf[64]; + + zfs_nicenum(value, buf, sizeof (buf)); + (void) printf(" %5s", buf); +} + +/* + * Print out all the statistics for the given vdev. This can either be the + * toplevel configuration, or called recursively. If 'name' is NULL, then this + * is a verbose output, and we don't want to display the toplevel pool stats. + */ +void +print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, + nvlist_t *newnv, iostat_cbdata_t *cb, int depth) +{ + nvlist_t **oldchild, **newchild; + uint_t c, children; + vdev_stat_t *oldvs, *newvs; + vdev_stat_t zerovs = { 0 }; + uint64_t tdelta; + double scale; + char *vname; + + if (oldnv != NULL) { + verify(nvlist_lookup_uint64_array(oldnv, ZPOOL_CONFIG_STATS, + (uint64_t **)&oldvs, &c) == 0); + } else { + oldvs = &zerovs; + } + + verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_STATS, + (uint64_t **)&newvs, &c) == 0); + + if (strlen(name) + depth > cb->cb_namewidth) + (void) printf("%*s%s", depth, "", name); + else + (void) printf("%*s%s%*s", depth, "", name, + (int)(cb->cb_namewidth - strlen(name) - depth), ""); + + tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; + + if (tdelta == 0) + scale = 1.0; + else + scale = (double)NANOSEC / tdelta; + + /* only toplevel vdevs have capacity stats */ + if (newvs->vs_space == 0) { + (void) printf(" - -"); + } else { + print_one_stat(newvs->vs_alloc); + print_one_stat(newvs->vs_space - newvs->vs_alloc); + } + + print_one_stat((uint64_t)(scale * (newvs->vs_ops[ZIO_TYPE_READ] - + oldvs->vs_ops[ZIO_TYPE_READ]))); + + print_one_stat((uint64_t)(scale * (newvs->vs_ops[ZIO_TYPE_WRITE] - + oldvs->vs_ops[ZIO_TYPE_WRITE]))); + + print_one_stat((uint64_t)(scale * (newvs->vs_bytes[ZIO_TYPE_READ] - + oldvs->vs_bytes[ZIO_TYPE_READ]))); + + print_one_stat((uint64_t)(scale * (newvs->vs_bytes[ZIO_TYPE_WRITE] - + oldvs->vs_bytes[ZIO_TYPE_WRITE]))); + + (void) printf("\n"); + + if (!cb->cb_verbose) + return; + + if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, + &newchild, &children) != 0) + return; + + if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, + &oldchild, &c) != 0) + return; + + for (c = 0; c < children; c++) { + vname = zpool_vdev_name(g_zfs, zhp, newchild[c]); + print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, + newchild[c], cb, depth + 2); + free(vname); + } + + /* + * Include level 2 ARC devices in iostat output + */ + if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, + &newchild, &children) != 0) + return; + + if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, + &oldchild, &c) != 0) + return; + + if (children > 0) { + (void) printf("%-*s - - - - - " + "-\n", cb->cb_namewidth, "cache"); + for (c = 0; c < children; c++) { + vname = zpool_vdev_name(g_zfs, zhp, newchild[c]); + print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, + newchild[c], cb, depth + 2); + free(vname); + } + } +} + +static int +refresh_iostat(zpool_handle_t *zhp, void *data) +{ + iostat_cbdata_t *cb = data; + boolean_t missing; + + /* + * If the pool has disappeared, remove it from the list and continue. + */ + if (zpool_refresh_stats(zhp, &missing) != 0) + return (-1); + + if (missing) + pool_list_remove(cb->cb_list, zhp); + + return (0); +} + +/* + * Callback to print out the iostats for the given pool. + */ +int +print_iostat(zpool_handle_t *zhp, void *data) +{ + iostat_cbdata_t *cb = data; + nvlist_t *oldconfig, *newconfig; + nvlist_t *oldnvroot, *newnvroot; + + newconfig = zpool_get_config(zhp, &oldconfig); + + if (cb->cb_iteration == 1) + oldconfig = NULL; + + verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, + &newnvroot) == 0); + + if (oldconfig == NULL) + oldnvroot = NULL; + else + verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, + &oldnvroot) == 0); + + /* + * Print out the statistics for the pool. + */ + print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, cb, 0); + + if (cb->cb_verbose) + print_iostat_separator(cb); + + return (0); +} + +int +get_namewidth(zpool_handle_t *zhp, void *data) +{ + iostat_cbdata_t *cb = data; + nvlist_t *config, *nvroot; + + if ((config = zpool_get_config(zhp, NULL)) != NULL) { + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if (!cb->cb_verbose) + cb->cb_namewidth = strlen(zpool_get_name(zhp)); + else + cb->cb_namewidth = max_width(zhp, nvroot, 0, 0); + } + + /* + * The width must fall into the range [10,38]. The upper limit is the + * maximum we can have and still fit in 80 columns. + */ + if (cb->cb_namewidth < 10) + cb->cb_namewidth = 10; + if (cb->cb_namewidth > 38) + cb->cb_namewidth = 38; + + return (0); +} + +/* + * zpool iostat [-v] [pool] ... [interval [count]] + * + * -v Display statistics for individual vdevs + * + * This command can be tricky because we want to be able to deal with pool + * creation/destruction as well as vdev configuration changes. The bulk of this + * processing is handled by the pool_list_* routines in zpool_iter.c. We rely + * on pool_list_update() to detect the addition of new pools. Configuration + * changes are all handled within libzfs. + */ +int +zpool_do_iostat(int argc, char **argv) +{ + int c; + int ret; + int npools; + unsigned long interval = 0, count = 0; + zpool_list_t *list; + boolean_t verbose = B_FALSE; + iostat_cbdata_t cb; + + /* check options */ + while ((c = getopt(argc, argv, "v")) != -1) { + switch (c) { + case 'v': + verbose = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* + * Determine if the last argument is an integer or a pool name + */ + if (argc > 0 && isdigit(argv[argc - 1][0])) { + char *end; + + errno = 0; + interval = strtoul(argv[argc - 1], &end, 10); + + if (*end == '\0' && errno == 0) { + if (interval == 0) { + (void) fprintf(stderr, gettext("interval " + "cannot be zero\n")); + usage(B_FALSE); + } + + /* + * Ignore the last parameter + */ + argc--; + } else { + /* + * If this is not a valid number, just plow on. The + * user will get a more informative error message later + * on. + */ + interval = 0; + } + } + + /* + * If the last argument is also an integer, then we have both a count + * and an integer. + */ + if (argc > 0 && isdigit(argv[argc - 1][0])) { + char *end; + + errno = 0; + count = interval; + interval = strtoul(argv[argc - 1], &end, 10); + + if (*end == '\0' && errno == 0) { + if (interval == 0) { + (void) fprintf(stderr, gettext("interval " + "cannot be zero\n")); + usage(B_FALSE); + } + + /* + * Ignore the last parameter + */ + argc--; + } else { + interval = 0; + } + } + + /* + * Construct the list of all interesting pools. + */ + ret = 0; + if ((list = pool_list_get(argc, argv, NULL, &ret)) == NULL) + return (1); + + if (pool_list_count(list) == 0 && argc != 0) { + pool_list_free(list); + return (1); + } + + if (pool_list_count(list) == 0 && interval == 0) { + pool_list_free(list); + (void) fprintf(stderr, gettext("no pools available\n")); + return (1); + } + + /* + * Enter the main iostat loop. + */ + cb.cb_list = list; + cb.cb_verbose = verbose; + cb.cb_iteration = 0; + cb.cb_namewidth = 0; + + for (;;) { + pool_list_update(list); + + if ((npools = pool_list_count(list)) == 0) + break; + + /* + * Refresh all statistics. This is done as an explicit step + * before calculating the maximum name width, so that any + * configuration changes are properly accounted for. + */ + (void) pool_list_iter(list, B_FALSE, refresh_iostat, &cb); + + /* + * Iterate over all pools to determine the maximum width + * for the pool / device name column across all pools. + */ + cb.cb_namewidth = 0; + (void) pool_list_iter(list, B_FALSE, get_namewidth, &cb); + + /* + * If it's the first time, or verbose mode, print the header. + */ + if (++cb.cb_iteration == 1 || verbose) + print_iostat_header(&cb); + + (void) pool_list_iter(list, B_FALSE, print_iostat, &cb); + + /* + * If there's more than one pool, and we're not in verbose mode + * (which prints a separator for us), then print a separator. + */ + if (npools > 1 && !verbose) + print_iostat_separator(&cb); + + if (verbose) + (void) printf("\n"); + + /* + * Flush the output so that redirection to a file isn't buffered + * indefinitely. + */ + (void) fflush(stdout); + + if (interval == 0) + break; + + if (count != 0 && --count == 0) + break; + + (void) sleep(interval); + } + + pool_list_free(list); + + return (ret); +} + +typedef struct list_cbdata { + boolean_t cb_scripted; + boolean_t cb_first; + zprop_list_t *cb_proplist; +} list_cbdata_t; + +/* + * Given a list of columns to display, output appropriate headers for each one. + */ +static void +print_header(zprop_list_t *pl) +{ + const char *header; + boolean_t first = B_TRUE; + boolean_t right_justify; + + for (; pl != NULL; pl = pl->pl_next) { + if (pl->pl_prop == ZPROP_INVAL) + continue; + + if (!first) + (void) printf(" "); + else + first = B_FALSE; + + header = zpool_prop_column_name(pl->pl_prop); + right_justify = zpool_prop_align_right(pl->pl_prop); + + if (pl->pl_next == NULL && !right_justify) + (void) printf("%s", header); + else if (right_justify) + (void) printf("%*s", pl->pl_width, header); + else + (void) printf("%-*s", pl->pl_width, header); + } + + (void) printf("\n"); +} + +/* + * Given a pool and a list of properties, print out all the properties according + * to the described layout. + */ +static void +print_pool(zpool_handle_t *zhp, zprop_list_t *pl, int scripted) +{ + boolean_t first = B_TRUE; + char property[ZPOOL_MAXPROPLEN]; + char *propstr; + boolean_t right_justify; + int width; + + for (; pl != NULL; pl = pl->pl_next) { + if (!first) { + if (scripted) + (void) printf("\t"); + else + (void) printf(" "); + } else { + first = B_FALSE; + } + + right_justify = B_FALSE; + if (pl->pl_prop != ZPROP_INVAL) { + if (zpool_get_prop(zhp, pl->pl_prop, property, + sizeof (property), NULL) != 0) + propstr = "-"; + else + propstr = property; + + right_justify = zpool_prop_align_right(pl->pl_prop); + } else { + propstr = "-"; + } + + width = pl->pl_width; + + /* + * If this is being called in scripted mode, or if this is the + * last column and it is left-justified, don't include a width + * format specifier. + */ + if (scripted || (pl->pl_next == NULL && !right_justify)) + (void) printf("%s", propstr); + else if (right_justify) + (void) printf("%*s", width, propstr); + else + (void) printf("%-*s", width, propstr); + } + + (void) printf("\n"); +} + +/* + * Generic callback function to list a pool. + */ +int +list_callback(zpool_handle_t *zhp, void *data) +{ + list_cbdata_t *cbp = data; + + if (cbp->cb_first) { + if (!cbp->cb_scripted) + print_header(cbp->cb_proplist); + cbp->cb_first = B_FALSE; + } + + print_pool(zhp, cbp->cb_proplist, cbp->cb_scripted); + + return (0); +} + +/* + * zpool list [-H] [-o prop[,prop]*] [pool] ... + * + * -H Scripted mode. Don't display headers, and separate properties + * by a single tab. + * -o List of properties to display. Defaults to + * "name,size,used,available,capacity,health,altroot" + * + * List all pools in the system, whether or not they're healthy. Output space + * statistics for each one, as well as health status summary. + */ +int +zpool_do_list(int argc, char **argv) +{ + int c; + int ret; + list_cbdata_t cb = { 0 }; + static char default_props[] = + "name,size,used,available,capacity,health,altroot"; + char *props = default_props; + + /* check options */ + while ((c = getopt(argc, argv, ":Ho:")) != -1) { + switch (c) { + case 'H': + cb.cb_scripted = B_TRUE; + break; + case 'o': + props = optarg; + break; + case ':': + (void) fprintf(stderr, gettext("missing argument for " + "'%c' option\n"), optopt); + usage(B_FALSE); + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) + usage(B_FALSE); + + cb.cb_first = B_TRUE; + + ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, + list_callback, &cb); + + zprop_free_list(cb.cb_proplist); + + if (argc == 0 && cb.cb_first && !cb.cb_scripted) { + (void) printf(gettext("no pools available\n")); + return (0); + } + + return (ret); +} + +static nvlist_t * +zpool_get_vdev_by_name(nvlist_t *nv, char *name) +{ + nvlist_t **child; + uint_t c, children; + nvlist_t *match; + char *path; + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) { + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); + if (strncmp(name, "/dev/dsk/", 9) == 0) + name += 9; + if (strncmp(path, "/dev/dsk/", 9) == 0) + path += 9; + if (strcmp(name, path) == 0) + return (nv); + return (NULL); + } + + for (c = 0; c < children; c++) + if ((match = zpool_get_vdev_by_name(child[c], name)) != NULL) + return (match); + + return (NULL); +} + +static int +zpool_do_attach_or_replace(int argc, char **argv, int replacing) +{ + boolean_t force = B_FALSE; + int c; + nvlist_t *nvroot; + char *poolname, *old_disk, *new_disk; + zpool_handle_t *zhp; + int ret; + + /* check options */ + while ((c = getopt(argc, argv, "f")) != -1) { + switch (c) { + case 'f': + force = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + + if (argc < 2) { + (void) fprintf(stderr, + gettext("missing specification\n")); + usage(B_FALSE); + } + + old_disk = argv[1]; + + if (argc < 3) { + if (!replacing) { + (void) fprintf(stderr, + gettext("missing specification\n")); + usage(B_FALSE); + } + new_disk = old_disk; + argc -= 1; + argv += 1; + } else { + new_disk = argv[2]; + argc -= 2; + argv += 2; + } + + if (argc > 1) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + if (zpool_get_config(zhp, NULL) == NULL) { + (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), + poolname); + zpool_close(zhp); + return (1); + } + + nvroot = make_root_vdev(zhp, force, B_FALSE, replacing, B_FALSE, + argc, argv); + if (nvroot == NULL) { + zpool_close(zhp); + return (1); + } + + ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing); + + nvlist_free(nvroot); + zpool_close(zhp); + + return (ret); +} + +/* + * zpool replace [-f] + * + * -f Force attach, even if appears to be in use. + * + * Replace with . + */ +/* ARGSUSED */ +int +zpool_do_replace(int argc, char **argv) +{ + return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); +} + +/* + * zpool attach [-f] + * + * -f Force attach, even if appears to be in use. + * + * Attach to the mirror containing . If is not + * part of a mirror, then will be transformed into a mirror of + * and . In either case, will begin life + * with a DTL of [0, now], and will immediately begin to resilver itself. + */ +int +zpool_do_attach(int argc, char **argv) +{ + return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); +} + +/* + * zpool detach [-f] + * + * -f Force detach of , even if DTLs argue against it + * (not supported yet) + * + * Detach a device from a mirror. The operation will be refused if + * is the last device in the mirror, or if the DTLs indicate that this device + * has the only valid copy of some data. + */ +/* ARGSUSED */ +int +zpool_do_detach(int argc, char **argv) +{ + int c; + char *poolname, *path; + zpool_handle_t *zhp; + int ret; + + /* check options */ + while ((c = getopt(argc, argv, "f")) != -1) { + switch (c) { + case 'f': + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + } + + if (argc < 2) { + (void) fprintf(stderr, + gettext("missing specification\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + path = argv[1]; + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + ret = zpool_vdev_detach(zhp, path); + + zpool_close(zhp); + + return (ret); +} + +/* + * zpool online ... + */ +int +zpool_do_online(int argc, char **argv) +{ + int c, i; + char *poolname; + zpool_handle_t *zhp; + int ret = 0; + vdev_state_t newstate; + + /* check options */ + while ((c = getopt(argc, argv, "t")) != -1) { + switch (c) { + case 't': + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing device name\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + for (i = 1; i < argc; i++) { + if (zpool_vdev_online(zhp, argv[i], 0, &newstate) == 0) { + if (newstate != VDEV_STATE_HEALTHY) { + (void) printf(gettext("warning: device '%s' " + "onlined, but remains in faulted state\n"), + argv[i]); + if (newstate == VDEV_STATE_FAULTED) + (void) printf(gettext("use 'zpool " + "clear' to restore a faulted " + "device\n")); + else + (void) printf(gettext("use 'zpool " + "replace' to replace devices " + "that are no longer present\n")); + } + } else { + ret = 1; + } + } + + zpool_close(zhp); + + return (ret); +} + +/* + * zpool offline [-ft] ... + * + * -f Force the device into the offline state, even if doing + * so would appear to compromise pool availability. + * (not supported yet) + * + * -t Only take the device off-line temporarily. The offline + * state will not be persistent across reboots. + */ +/* ARGSUSED */ +int +zpool_do_offline(int argc, char **argv) +{ + int c, i; + char *poolname; + zpool_handle_t *zhp; + int ret = 0; + boolean_t istmp = B_FALSE; + + /* check options */ + while ((c = getopt(argc, argv, "ft")) != -1) { + switch (c) { + case 't': + istmp = B_TRUE; + break; + case 'f': + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + /* get pool name and check number of arguments */ + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name\n")); + usage(B_FALSE); + } + if (argc < 2) { + (void) fprintf(stderr, gettext("missing device name\n")); + usage(B_FALSE); + } + + poolname = argv[0]; + + if ((zhp = zpool_open(g_zfs, poolname)) == NULL) + return (1); + + for (i = 1; i < argc; i++) { + if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) + ret = 1; + } + + zpool_close(zhp); + + return (ret); +} + +/* + * zpool clear [device] + * + * Clear all errors associated with a pool or a particular device. + */ +int +zpool_do_clear(int argc, char **argv) +{ + int ret = 0; + zpool_handle_t *zhp; + char *pool, *device; + + if (argc < 2) { + (void) fprintf(stderr, gettext("missing pool name\n")); + usage(B_FALSE); + } + + if (argc > 3) { + (void) fprintf(stderr, gettext("too many arguments\n")); + usage(B_FALSE); + } + + pool = argv[1]; + device = argc == 3 ? argv[2] : NULL; + + if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) + return (1); + + if (zpool_clear(zhp, device) != 0) + ret = 1; + + zpool_close(zhp); + + return (ret); +} + +typedef struct scrub_cbdata { + int cb_type; + int cb_argc; + char **cb_argv; +} scrub_cbdata_t; + +int +scrub_callback(zpool_handle_t *zhp, void *data) +{ + scrub_cbdata_t *cb = data; + int err; + + /* + * Ignore faulted pools. + */ + if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { + (void) fprintf(stderr, gettext("cannot scrub '%s': pool is " + "currently unavailable\n"), zpool_get_name(zhp)); + return (1); + } + + err = zpool_scrub(zhp, cb->cb_type); + + return (err != 0); +} + +/* + * zpool scrub [-s] ... + * + * -s Stop. Stops any in-progress scrub. + */ +int +zpool_do_scrub(int argc, char **argv) +{ + int c; + scrub_cbdata_t cb; + + cb.cb_type = POOL_SCRUB_EVERYTHING; + + /* check options */ + while ((c = getopt(argc, argv, "s")) != -1) { + switch (c) { + case 's': + cb.cb_type = POOL_SCRUB_NONE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + cb.cb_argc = argc; + cb.cb_argv = argv; + argc -= optind; + argv += optind; + + if (argc < 1) { + (void) fprintf(stderr, gettext("missing pool name argument\n")); + usage(B_FALSE); + } + + return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb)); +} + +typedef struct status_cbdata { + int cb_count; + boolean_t cb_allpools; + boolean_t cb_verbose; + boolean_t cb_explain; + boolean_t cb_first; +} status_cbdata_t; + +/* + * Print out detailed scrub status. + */ +void +print_scrub_status(nvlist_t *nvroot) +{ + vdev_stat_t *vs; + uint_t vsc; + time_t start, end, now; + double fraction_done; + uint64_t examined, total, minutes_left, minutes_taken; + char *scrub_type; + + verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &vsc) == 0); + + /* + * If there's never been a scrub, there's not much to say. + */ + if (vs->vs_scrub_end == 0 && vs->vs_scrub_type == POOL_SCRUB_NONE) { + (void) printf(gettext("none requested\n")); + return; + } + + scrub_type = (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ? + "resilver" : "scrub"; + + start = vs->vs_scrub_start; + end = vs->vs_scrub_end; + now = time(NULL); + examined = vs->vs_scrub_examined; + total = vs->vs_alloc; + + if (end != 0) { + minutes_taken = (uint64_t)((end - start) / 60); + + (void) printf(gettext("%s %s after %lluh%um with %llu errors " + "on %s"), + scrub_type, vs->vs_scrub_complete ? "completed" : "stopped", + (u_longlong_t)(minutes_taken / 60), + (uint_t)(minutes_taken % 60), + (u_longlong_t)vs->vs_scrub_errors, ctime(&end)); + return; + } + + if (examined == 0) + examined = 1; + if (examined > total) + total = examined; + + fraction_done = (double)examined / total; + minutes_left = (uint64_t)((now - start) * + (1 - fraction_done) / fraction_done / 60); + minutes_taken = (uint64_t)((now - start) / 60); + + (void) printf(gettext("%s in progress for %lluh%um, %.2f%% done, " + "%lluh%um to go\n"), + scrub_type, (u_longlong_t)(minutes_taken / 60), + (uint_t)(minutes_taken % 60), 100 * fraction_done, + (u_longlong_t)(minutes_left / 60), (uint_t)(minutes_left % 60)); +} + +typedef struct spare_cbdata { + uint64_t cb_guid; + zpool_handle_t *cb_zhp; +} spare_cbdata_t; + +static boolean_t +find_vdev(nvlist_t *nv, uint64_t search) +{ + uint64_t guid; + nvlist_t **child; + uint_t c, children; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && + search == guid) + return (B_TRUE); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if (find_vdev(child[c], search)) + return (B_TRUE); + } + + return (B_FALSE); +} + +static int +find_spare(zpool_handle_t *zhp, void *data) +{ + spare_cbdata_t *cbp = data; + nvlist_t *config, *nvroot; + + config = zpool_get_config(zhp, NULL); + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + if (find_vdev(nvroot, cbp->cb_guid)) { + cbp->cb_zhp = zhp; + return (1); + } + + zpool_close(zhp); + return (0); +} + +/* + * Print out configuration state as requested by status_callback. + */ +void +print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv, + int namewidth, int depth, boolean_t isspare, boolean_t print_logs) +{ + nvlist_t **child; + uint_t c, children; + vdev_stat_t *vs; + char rbuf[6], wbuf[6], cbuf[6], repaired[7]; + char *vname; + uint64_t notpresent; + spare_cbdata_t cb; + char *state; + + verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &c) == 0); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + children = 0; + + state = zpool_state_to_name(vs->vs_state, vs->vs_aux); + if (isspare) { + /* + * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for + * online drives. + */ + if (vs->vs_aux == VDEV_AUX_SPARED) + state = "INUSE"; + else if (vs->vs_state == VDEV_STATE_HEALTHY) + state = "AVAIL"; + } + + (void) printf("\t%*s%-*s %-8s", depth, "", namewidth - depth, + name, state); + + if (!isspare) { + zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); + zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); + zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf)); + (void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf); + } + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, + ¬present) == 0) { + char *path; + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); + (void) printf(" was %s", path); + } else if (vs->vs_aux != 0) { + (void) printf(" "); + + switch (vs->vs_aux) { + case VDEV_AUX_OPEN_FAILED: + (void) printf(gettext("cannot open")); + break; + + case VDEV_AUX_BAD_GUID_SUM: + (void) printf(gettext("missing device")); + break; + + case VDEV_AUX_NO_REPLICAS: + (void) printf(gettext("insufficient replicas")); + break; + + case VDEV_AUX_VERSION_NEWER: + (void) printf(gettext("newer version")); + break; + + case VDEV_AUX_SPARED: + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, + &cb.cb_guid) == 0); + if (zpool_iter(g_zfs, find_spare, &cb) == 1) { + if (strcmp(zpool_get_name(cb.cb_zhp), + zpool_get_name(zhp)) == 0) + (void) printf(gettext("currently in " + "use")); + else + (void) printf(gettext("in use by " + "pool '%s'"), + zpool_get_name(cb.cb_zhp)); + zpool_close(cb.cb_zhp); + } else { + (void) printf(gettext("currently in use")); + } + break; + + case VDEV_AUX_ERR_EXCEEDED: + (void) printf(gettext("too many errors")); + break; + + case VDEV_AUX_IO_FAILURE: + (void) printf(gettext("experienced I/O failures")); + break; + + case VDEV_AUX_BAD_LOG: + (void) printf(gettext("bad intent log")); + break; + + default: + (void) printf(gettext("corrupted data")); + break; + } + } else if (vs->vs_scrub_repaired != 0 && children == 0) { + /* + * Report bytes resilvered/repaired on leaf devices. + */ + zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired)); + (void) printf(gettext(" %s %s"), repaired, + (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ? + "resilvered" : "repaired"); + } + + (void) printf("\n"); + + for (c = 0; c < children; c++) { + uint64_t is_log = B_FALSE; + + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + if ((is_log && !print_logs) || (!is_log && print_logs)) + continue; + vname = zpool_vdev_name(g_zfs, zhp, child[c]); + print_status_config(zhp, vname, child[c], + namewidth, depth + 2, isspare, B_FALSE); + free(vname); + } +} + +static void +print_error_log(zpool_handle_t *zhp) +{ + nvlist_t *nverrlist = NULL; + nvpair_t *elem; + char *pathname; + size_t len = MAXPATHLEN * 2; + + if (zpool_get_errlog(zhp, &nverrlist) != 0) { + (void) printf("errors: List of errors unavailable " + "(insufficient privileges)\n"); + return; + } + + (void) printf("errors: Permanent errors have been " + "detected in the following files:\n\n"); + + pathname = safe_malloc(len); + elem = NULL; + while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { + nvlist_t *nv; + uint64_t dsobj, obj; + + verify(nvpair_value_nvlist(elem, &nv) == 0); + verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, + &dsobj) == 0); + verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, + &obj) == 0); + zpool_obj_to_path(zhp, dsobj, obj, pathname, len); + (void) printf("%7s %s\n", "", pathname); + } + free(pathname); + nvlist_free(nverrlist); +} + +static void +print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares, + int namewidth) +{ + uint_t i; + char *name; + + if (nspares == 0) + return; + + (void) printf(gettext("\tspares\n")); + + for (i = 0; i < nspares; i++) { + name = zpool_vdev_name(g_zfs, zhp, spares[i]); + print_status_config(zhp, name, spares[i], + namewidth, 2, B_TRUE, B_FALSE); + free(name); + } +} + +static void +print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache, + int namewidth) +{ + uint_t i; + char *name; + + if (nl2cache == 0) + return; + + (void) printf(gettext("\tcache\n")); + + for (i = 0; i < nl2cache; i++) { + name = zpool_vdev_name(g_zfs, zhp, l2cache[i]); + print_status_config(zhp, name, l2cache[i], + namewidth, 2, B_FALSE, B_FALSE); + free(name); + } +} + +/* + * Display a summary of pool status. Displays a summary such as: + * + * pool: tank + * status: DEGRADED + * reason: One or more devices ... + * see: http://www.sun.com/msg/ZFS-xxxx-01 + * config: + * mirror DEGRADED + * c1t0d0 OK + * c2t0d0 UNAVAIL + * + * When given the '-v' option, we print out the complete config. If the '-e' + * option is specified, then we print out error rate information as well. + */ +int +status_callback(zpool_handle_t *zhp, void *data) +{ + status_cbdata_t *cbp = data; + nvlist_t *config, *nvroot; + char *msgid; + int reason; + const char *health; + uint_t c; + vdev_stat_t *vs; + + config = zpool_get_config(zhp, NULL); + reason = zpool_get_status(zhp, &msgid); + + cbp->cb_count++; + + /* + * If we were given 'zpool status -x', only report those pools with + * problems. + */ + if (reason == ZPOOL_STATUS_OK && cbp->cb_explain) { + if (!cbp->cb_allpools) { + (void) printf(gettext("pool '%s' is healthy\n"), + zpool_get_name(zhp)); + if (cbp->cb_first) + cbp->cb_first = B_FALSE; + } + return (0); + } + + if (cbp->cb_first) + cbp->cb_first = B_FALSE; + else + (void) printf("\n"); + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &c) == 0); + health = zpool_state_to_name(vs->vs_state, vs->vs_aux); + + (void) printf(gettext(" pool: %s\n"), zpool_get_name(zhp)); + (void) printf(gettext(" state: %s\n"), health); + + switch (reason) { + case ZPOOL_STATUS_MISSING_DEV_R: + (void) printf(gettext("status: One or more devices could not " + "be opened. Sufficient replicas exist for\n\tthe pool to " + "continue functioning in a degraded state.\n")); + (void) printf(gettext("action: Attach the missing device and " + "online it using 'zpool online'.\n")); + break; + + case ZPOOL_STATUS_MISSING_DEV_NR: + (void) printf(gettext("status: One or more devices could not " + "be opened. There are insufficient\n\treplicas for the " + "pool to continue functioning.\n")); + (void) printf(gettext("action: Attach the missing device and " + "online it using 'zpool online'.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_LABEL_R: + (void) printf(gettext("status: One or more devices could not " + "be used because the label is missing or\n\tinvalid. " + "Sufficient replicas exist for the pool to continue\n\t" + "functioning in a degraded state.\n")); + (void) printf(gettext("action: Replace the device using " + "'zpool replace'.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_LABEL_NR: + (void) printf(gettext("status: One or more devices could not " + "be used because the label is missing \n\tor invalid. " + "There are insufficient replicas for the pool to " + "continue\n\tfunctioning.\n")); + (void) printf(gettext("action: Destroy and re-create the pool " + "from a backup source.\n")); + break; + + case ZPOOL_STATUS_FAILING_DEV: + (void) printf(gettext("status: One or more devices has " + "experienced an unrecoverable error. An\n\tattempt was " + "made to correct the error. Applications are " + "unaffected.\n")); + (void) printf(gettext("action: Determine if the device needs " + "to be replaced, and clear the errors\n\tusing " + "'zpool clear' or replace the device with 'zpool " + "replace'.\n")); + break; + + case ZPOOL_STATUS_OFFLINE_DEV: + (void) printf(gettext("status: One or more devices has " + "been taken offline by the administrator.\n\tSufficient " + "replicas exist for the pool to continue functioning in " + "a\n\tdegraded state.\n")); + (void) printf(gettext("action: Online the device using " + "'zpool online' or replace the device with\n\t'zpool " + "replace'.\n")); + break; + + case ZPOOL_STATUS_RESILVERING: + (void) printf(gettext("status: One or more devices is " + "currently being resilvered. The pool will\n\tcontinue " + "to function, possibly in a degraded state.\n")); + (void) printf(gettext("action: Wait for the resilver to " + "complete.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_DATA: + (void) printf(gettext("status: One or more devices has " + "experienced an error resulting in data\n\tcorruption. " + "Applications may be affected.\n")); + (void) printf(gettext("action: Restore the file in question " + "if possible. Otherwise restore the\n\tentire pool from " + "backup.\n")); + break; + + case ZPOOL_STATUS_CORRUPT_POOL: + (void) printf(gettext("status: The pool metadata is corrupted " + "and the pool cannot be opened.\n")); + (void) printf(gettext("action: Destroy and re-create the pool " + "from a backup source.\n")); + break; + + case ZPOOL_STATUS_VERSION_OLDER: + (void) printf(gettext("status: The pool is formatted using an " + "older on-disk format. The pool can\n\tstill be used, but " + "some features are unavailable.\n")); + (void) printf(gettext("action: Upgrade the pool using 'zpool " + "upgrade'. Once this is done, the\n\tpool will no longer " + "be accessible on older software versions.\n")); + break; + + case ZPOOL_STATUS_VERSION_NEWER: + (void) printf(gettext("status: The pool has been upgraded to a " + "newer, incompatible on-disk version.\n\tThe pool cannot " + "be accessed on this system.\n")); + (void) printf(gettext("action: Access the pool from a system " + "running more recent software, or\n\trestore the pool from " + "backup.\n")); + break; + + case ZPOOL_STATUS_FAULTED_DEV_R: + (void) printf(gettext("status: One or more devices are " + "faulted in response to persistent errors.\n\tSufficient " + "replicas exist for the pool to continue functioning " + "in a\n\tdegraded state.\n")); + (void) printf(gettext("action: Replace the faulted device, " + "or use 'zpool clear' to mark the device\n\trepaired.\n")); + break; + + case ZPOOL_STATUS_FAULTED_DEV_NR: + (void) printf(gettext("status: One or more devices are " + "faulted in response to persistent errors. There are " + "insufficient replicas for the pool to\n\tcontinue " + "functioning.\n")); + (void) printf(gettext("action: Destroy and re-create the pool " + "from a backup source. Manually marking the device\n" + "\trepaired using 'zpool clear' may allow some data " + "to be recovered.\n")); + break; + + case ZPOOL_STATUS_IO_FAILURE_WAIT: + case ZPOOL_STATUS_IO_FAILURE_CONTINUE: + (void) printf(gettext("status: One or more devices are " + "faulted in response to IO failures.\n")); + (void) printf(gettext("action: Make sure the affected devices " + "are connected, then run 'zpool clear'.\n")); + break; + + case ZPOOL_STATUS_BAD_LOG: + (void) printf(gettext("status: An intent log record " + "could not be read.\n" + "\tWaiting for adminstrator intervention to fix the " + "faulted pool.\n")); + (void) printf(gettext("action: Either restore the affected " + "device(s) and run 'zpool online',\n" + "\tor ignore the intent log records by running " + "'zpool clear'.\n")); + break; + + default: + /* + * The remaining errors can't actually be generated, yet. + */ + assert(reason == ZPOOL_STATUS_OK); + } + + if (msgid != NULL) + (void) printf(gettext(" see: http://www.sun.com/msg/%s\n"), + msgid); + + if (config != NULL) { + int namewidth; + uint64_t nerr; + nvlist_t **spares, **l2cache; + uint_t nspares, nl2cache; + + + (void) printf(gettext(" scrub: ")); + print_scrub_status(nvroot); + + namewidth = max_width(zhp, nvroot, 0, 0); + if (namewidth < 10) + namewidth = 10; + + (void) printf(gettext("config:\n\n")); + (void) printf(gettext("\t%-*s %-8s %5s %5s %5s\n"), namewidth, + "NAME", "STATE", "READ", "WRITE", "CKSUM"); + print_status_config(zhp, zpool_get_name(zhp), nvroot, + namewidth, 0, B_FALSE, B_FALSE); + if (num_logs(nvroot) > 0) + print_status_config(zhp, "logs", nvroot, namewidth, 0, + B_FALSE, B_TRUE); + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) + print_l2cache(zhp, l2cache, nl2cache, namewidth); + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) + print_spares(zhp, spares, nspares, namewidth); + + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, + &nerr) == 0) { + nvlist_t *nverrlist = NULL; + + /* + * If the approximate error count is small, get a + * precise count by fetching the entire log and + * uniquifying the results. + */ + if (nerr > 0 && nerr < 100 && !cbp->cb_verbose && + zpool_get_errlog(zhp, &nverrlist) == 0) { + nvpair_t *elem; + + elem = NULL; + nerr = 0; + while ((elem = nvlist_next_nvpair(nverrlist, + elem)) != NULL) { + nerr++; + } + } + nvlist_free(nverrlist); + + (void) printf("\n"); + + if (nerr == 0) + (void) printf(gettext("errors: No known data " + "errors\n")); + else if (!cbp->cb_verbose) + (void) printf(gettext("errors: %llu data " + "errors, use '-v' for a list\n"), + (u_longlong_t)nerr); + else + print_error_log(zhp); + } + } else { + (void) printf(gettext("config: The configuration cannot be " + "determined.\n")); + } + + return (0); +} + +/* + * zpool status [-vx] [pool] ... + * + * -v Display complete error logs + * -x Display only pools with potential problems + * + * Describes the health status of all pools or some subset. + */ +int +zpool_do_status(int argc, char **argv) +{ + int c; + int ret; + status_cbdata_t cb = { 0 }; + + /* check options */ + while ((c = getopt(argc, argv, "vx")) != -1) { + switch (c) { + case 'v': + cb.cb_verbose = B_TRUE; + break; + case 'x': + cb.cb_explain = B_TRUE; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + argc -= optind; + argv += optind; + + cb.cb_first = B_TRUE; + + if (argc == 0) + cb.cb_allpools = B_TRUE; + + ret = for_each_pool(argc, argv, B_TRUE, NULL, status_callback, &cb); + + if (argc == 0 && cb.cb_count == 0) + (void) printf(gettext("no pools available\n")); + else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) + (void) printf(gettext("all pools are healthy\n")); + + return (ret); +} + +typedef struct upgrade_cbdata { + int cb_all; + int cb_first; + int cb_newer; + int cb_argc; + uint64_t cb_version; + char **cb_argv; +} upgrade_cbdata_t; + +static int +upgrade_cb(zpool_handle_t *zhp, void *arg) +{ + upgrade_cbdata_t *cbp = arg; + nvlist_t *config; + uint64_t version; + int ret = 0; + + config = zpool_get_config(zhp, NULL); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, + &version) == 0); + + if (!cbp->cb_newer && version < SPA_VERSION) { + if (!cbp->cb_all) { + if (cbp->cb_first) { + (void) printf(gettext("The following pools are " + "out of date, and can be upgraded. After " + "being\nupgraded, these pools will no " + "longer be accessible by older software " + "versions.\n\n")); + (void) printf(gettext("VER POOL\n")); + (void) printf(gettext("--- ------------\n")); + cbp->cb_first = B_FALSE; + } + + (void) printf("%2llu %s\n", (u_longlong_t)version, + zpool_get_name(zhp)); + } else { + cbp->cb_first = B_FALSE; + ret = zpool_upgrade(zhp, cbp->cb_version); + if (!ret) { + (void) printf(gettext("Successfully upgraded " + "'%s'\n\n"), zpool_get_name(zhp)); + } + } + } else if (cbp->cb_newer && version > SPA_VERSION) { + assert(!cbp->cb_all); + + if (cbp->cb_first) { + (void) printf(gettext("The following pools are " + "formatted using a newer software version and\n" + "cannot be accessed on the current system.\n\n")); + (void) printf(gettext("VER POOL\n")); + (void) printf(gettext("--- ------------\n")); + cbp->cb_first = B_FALSE; + } + + (void) printf("%2llu %s\n", (u_longlong_t)version, + zpool_get_name(zhp)); + } + + zpool_close(zhp); + return (ret); +} + +/* ARGSUSED */ +static int +upgrade_one(zpool_handle_t *zhp, void *data) +{ + upgrade_cbdata_t *cbp = data; + uint64_t cur_version; + int ret; + + if (strcmp("log", zpool_get_name(zhp)) == 0) { + (void) printf(gettext("'log' is now a reserved word\n" + "Pool 'log' must be renamed using export and import" + " to upgrade.\n")); + return (1); + } + + cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); + if (cur_version > cbp->cb_version) { + (void) printf(gettext("Pool '%s' is already formatted " + "using more current version '%llu'.\n"), + zpool_get_name(zhp), cur_version); + return (0); + } + if (cur_version == cbp->cb_version) { + (void) printf(gettext("Pool '%s' is already formatted " + "using the current version.\n"), zpool_get_name(zhp)); + return (0); + } + + ret = zpool_upgrade(zhp, cbp->cb_version); + + if (!ret) { + (void) printf(gettext("Successfully upgraded '%s' " + "from version %llu to version %llu\n\n"), + zpool_get_name(zhp), (u_longlong_t)cur_version, + (u_longlong_t)cbp->cb_version); + } + + return (ret != 0); +} + +/* + * zpool upgrade + * zpool upgrade -v + * zpool upgrade [-V version] <-a | pool ...> + * + * With no arguments, display downrev'd ZFS pool available for upgrade. + * Individual pools can be upgraded by specifying the pool, and '-a' will + * upgrade all pools. + */ +int +zpool_do_upgrade(int argc, char **argv) +{ + int c; + upgrade_cbdata_t cb = { 0 }; + int ret = 0; + boolean_t showversions = B_FALSE; + char *end; + + + /* check options */ + while ((c = getopt(argc, argv, "avV:")) != -1) { + switch (c) { + case 'a': + cb.cb_all = B_TRUE; + break; + case 'v': + showversions = B_TRUE; + break; + case 'V': + cb.cb_version = strtoll(optarg, &end, 10); + if (*end != '\0' || cb.cb_version > SPA_VERSION || + cb.cb_version < SPA_VERSION_1) { + (void) fprintf(stderr, + gettext("invalid version '%s'\n"), optarg); + usage(B_FALSE); + } + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + + cb.cb_argc = argc; + cb.cb_argv = argv; + argc -= optind; + argv += optind; + + if (cb.cb_version == 0) { + cb.cb_version = SPA_VERSION; + } else if (!cb.cb_all && argc == 0) { + (void) fprintf(stderr, gettext("-V option is " + "incompatible with other arguments\n")); + usage(B_FALSE); + } + + if (showversions) { + if (cb.cb_all || argc != 0) { + (void) fprintf(stderr, gettext("-v option is " + "incompatible with other arguments\n")); + usage(B_FALSE); + } + } else if (cb.cb_all) { + if (argc != 0) { + (void) fprintf(stderr, gettext("-a option should not " + "be used along with a pool name\n")); + usage(B_FALSE); + } + } + + (void) printf(gettext("This system is currently running " + "ZFS pool version %llu.\n\n"), SPA_VERSION); + cb.cb_first = B_TRUE; + if (showversions) { + (void) printf(gettext("The following versions are " + "supported:\n\n")); + (void) printf(gettext("VER DESCRIPTION\n")); + (void) printf("--- -----------------------------------------" + "---------------\n"); + (void) printf(gettext(" 1 Initial ZFS version\n")); + (void) printf(gettext(" 2 Ditto blocks " + "(replicated metadata)\n")); + (void) printf(gettext(" 3 Hot spares and double parity " + "RAID-Z\n")); + (void) printf(gettext(" 4 zpool history\n")); + (void) printf(gettext(" 5 Compression using the gzip " + "algorithm\n")); + (void) printf(gettext(" 6 bootfs pool property\n")); + (void) printf(gettext(" 7 Separate intent log devices\n")); + (void) printf(gettext(" 8 Delegated administration\n")); + (void) printf(gettext(" 9 refquota and refreservation " + "properties\n")); + (void) printf(gettext(" 10 Cache devices\n")); + (void) printf(gettext(" 11 Improved scrub performance\n")); + (void) printf(gettext(" 12 Snapshot properties\n")); + (void) printf(gettext(" 13 snapused property\n")); + (void) printf(gettext(" 14 passthrough-x aclinherit " + "support\n")); + (void) printf(gettext("For more information on a particular " + "version, including supported releases, see:\n\n")); + (void) printf("http://www.opensolaris.org/os/community/zfs/" + "version/N\n\n"); + (void) printf(gettext("Where 'N' is the version number.\n")); + } else if (argc == 0) { + int notfound; + + ret = zpool_iter(g_zfs, upgrade_cb, &cb); + notfound = cb.cb_first; + + if (!cb.cb_all && ret == 0) { + if (!cb.cb_first) + (void) printf("\n"); + cb.cb_first = B_TRUE; + cb.cb_newer = B_TRUE; + ret = zpool_iter(g_zfs, upgrade_cb, &cb); + if (!cb.cb_first) { + notfound = B_FALSE; + (void) printf("\n"); + } + } + + if (ret == 0) { + if (notfound) + (void) printf(gettext("All pools are formatted " + "using this version.\n")); + else if (!cb.cb_all) + (void) printf(gettext("Use 'zpool upgrade -v' " + "for a list of available versions and " + "their associated\nfeatures.\n")); + } + } else { + ret = for_each_pool(argc, argv, B_FALSE, NULL, + upgrade_one, &cb); + } + + return (ret); +} + +typedef struct hist_cbdata { + boolean_t first; + int longfmt; + int internal; +} hist_cbdata_t; + +char *hist_event_table[LOG_END] = { + "invalid event", + "pool create", + "vdev add", + "pool remove", + "pool destroy", + "pool export", + "pool import", + "vdev attach", + "vdev replace", + "vdev detach", + "vdev online", + "vdev offline", + "vdev upgrade", + "pool clear", + "pool scrub", + "pool property set", + "create", + "clone", + "destroy", + "destroy_begin_sync", + "inherit", + "property set", + "quota set", + "permission update", + "permission remove", + "permission who remove", + "promote", + "receive", + "rename", + "reservation set", + "replay_inc_sync", + "replay_full_sync", + "rollback", + "snapshot", + "filesystem version upgrade", + "refquota set", + "refreservation set", + "pool scrub done", +}; + +/* + * Print out the command history for a specific pool. + */ +static int +get_history_one(zpool_handle_t *zhp, void *data) +{ + nvlist_t *nvhis; + nvlist_t **records; + uint_t numrecords; + char *cmdstr; + char *pathstr; + uint64_t dst_time; + time_t tsec; + struct tm t; + char tbuf[30]; + int ret, i; + uint64_t who; + struct passwd *pwd; + char *hostname; + char *zonename; + char internalstr[MAXPATHLEN]; + hist_cbdata_t *cb = (hist_cbdata_t *)data; + uint64_t txg; + uint64_t ievent; + + cb->first = B_FALSE; + + (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); + + if ((ret = zpool_get_history(zhp, &nvhis)) != 0) + return (ret); + + verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, + &records, &numrecords) == 0); + for (i = 0; i < numrecords; i++) { + if (nvlist_lookup_uint64(records[i], ZPOOL_HIST_TIME, + &dst_time) != 0) + continue; + + /* is it an internal event or a standard event? */ + if (nvlist_lookup_string(records[i], ZPOOL_HIST_CMD, + &cmdstr) != 0) { + if (cb->internal == 0) + continue; + + if (nvlist_lookup_uint64(records[i], + ZPOOL_HIST_INT_EVENT, &ievent) != 0) + continue; + verify(nvlist_lookup_uint64(records[i], + ZPOOL_HIST_TXG, &txg) == 0); + verify(nvlist_lookup_string(records[i], + ZPOOL_HIST_INT_STR, &pathstr) == 0); + if (ievent >= LOG_END) + continue; + (void) snprintf(internalstr, + sizeof (internalstr), + "[internal %s txg:%lld] %s", + hist_event_table[ievent], txg, + pathstr); + cmdstr = internalstr; + } + tsec = dst_time; + (void) localtime_r(&tsec, &t); + (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); + (void) printf("%s %s", tbuf, cmdstr); + + if (!cb->longfmt) { + (void) printf("\n"); + continue; + } + (void) printf(" ["); + if (nvlist_lookup_uint64(records[i], + ZPOOL_HIST_WHO, &who) == 0) { + pwd = getpwuid((uid_t)who); + if (pwd) + (void) printf("user %s on", + pwd->pw_name); + else + (void) printf("user %d on", + (int)who); + } else { + (void) printf(gettext("no info]\n")); + continue; + } + if (nvlist_lookup_string(records[i], + ZPOOL_HIST_HOST, &hostname) == 0) { + (void) printf(" %s", hostname); + } + if (nvlist_lookup_string(records[i], + ZPOOL_HIST_ZONE, &zonename) == 0) { + (void) printf(":%s", zonename); + } + + (void) printf("]"); + (void) printf("\n"); + } + (void) printf("\n"); + nvlist_free(nvhis); + + return (ret); +} + +/* + * zpool history + * + * Displays the history of commands that modified pools. + */ + + +int +zpool_do_history(int argc, char **argv) +{ + hist_cbdata_t cbdata = { 0 }; + int ret; + int c; + + cbdata.first = B_TRUE; + /* check options */ + while ((c = getopt(argc, argv, "li")) != -1) { + switch (c) { + case 'l': + cbdata.longfmt = 1; + break; + case 'i': + cbdata.internal = 1; + break; + case '?': + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + optopt); + usage(B_FALSE); + } + } + argc -= optind; + argv += optind; + + ret = for_each_pool(argc, argv, B_FALSE, NULL, get_history_one, + &cbdata); + + if (argc == 0 && cbdata.first == B_TRUE) { + (void) printf(gettext("no pools available\n")); + return (0); + } + + return (ret); +} + +static int +get_callback(zpool_handle_t *zhp, void *data) +{ + zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; + char value[MAXNAMELEN]; + zprop_source_t srctype; + zprop_list_t *pl; + + for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { + + /* + * Skip the special fake placeholder. This will also skip + * over the name property when 'all' is specified. + */ + if (pl->pl_prop == ZPOOL_PROP_NAME && + pl == cbp->cb_proplist) + continue; + + if (zpool_get_prop(zhp, pl->pl_prop, + value, sizeof (value), &srctype) != 0) + continue; + + zprop_print_one_property(zpool_get_name(zhp), cbp, + zpool_prop_to_name(pl->pl_prop), value, srctype, NULL); + } + return (0); +} + +int +zpool_do_get(int argc, char **argv) +{ + zprop_get_cbdata_t cb = { 0 }; + zprop_list_t fake_name = { 0 }; + int ret; + + if (argc < 3) + usage(B_FALSE); + + cb.cb_first = B_TRUE; + cb.cb_sources = ZPROP_SRC_ALL; + cb.cb_columns[0] = GET_COL_NAME; + cb.cb_columns[1] = GET_COL_PROPERTY; + cb.cb_columns[2] = GET_COL_VALUE; + cb.cb_columns[3] = GET_COL_SOURCE; + cb.cb_type = ZFS_TYPE_POOL; + + if (zprop_get_list(g_zfs, argv[1], &cb.cb_proplist, + ZFS_TYPE_POOL) != 0) + usage(B_FALSE); + + if (cb.cb_proplist != NULL) { + fake_name.pl_prop = ZPOOL_PROP_NAME; + fake_name.pl_width = strlen(gettext("NAME")); + fake_name.pl_next = cb.cb_proplist; + cb.cb_proplist = &fake_name; + } + + ret = for_each_pool(argc - 2, argv + 2, B_TRUE, &cb.cb_proplist, + get_callback, &cb); + + if (cb.cb_proplist == &fake_name) + zprop_free_list(fake_name.pl_next); + else + zprop_free_list(cb.cb_proplist); + + return (ret); +} + +typedef struct set_cbdata { + char *cb_propname; + char *cb_value; + boolean_t cb_any_successful; +} set_cbdata_t; + +int +set_callback(zpool_handle_t *zhp, void *data) +{ + int error; + set_cbdata_t *cb = (set_cbdata_t *)data; + + error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); + + if (!error) + cb->cb_any_successful = B_TRUE; + + return (error); +} + +int +zpool_do_set(int argc, char **argv) +{ + set_cbdata_t cb = { 0 }; + int error; + + if (argc > 1 && argv[1][0] == '-') { + (void) fprintf(stderr, gettext("invalid option '%c'\n"), + argv[1][1]); + usage(B_FALSE); + } + + if (argc < 2) { + (void) fprintf(stderr, gettext("missing property=value " + "argument\n")); + usage(B_FALSE); + } + + if (argc < 3) { + (void) fprintf(stderr, gettext("missing pool name\n")); + usage(B_FALSE); + } + + if (argc > 3) { + (void) fprintf(stderr, gettext("too many pool names\n")); + usage(B_FALSE); + } + + cb.cb_propname = argv[1]; + cb.cb_value = strchr(cb.cb_propname, '='); + if (cb.cb_value == NULL) { + (void) fprintf(stderr, gettext("missing value in " + "property=value argument\n")); + usage(B_FALSE); + } + + *(cb.cb_value) = '\0'; + cb.cb_value++; + + error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, + set_callback, &cb); + + return (error); +} + +static int +find_command_idx(char *command, int *idx) +{ + int i; + + for (i = 0; i < NCOMMAND; i++) { + if (command_table[i].name == NULL) + continue; + + if (strcmp(command, command_table[i].name) == 0) { + *idx = i; + return (0); + } + } + return (1); +} + +int +main(int argc, char **argv) +{ + int ret; + int i; + char *cmdname; + + (void) setlocale(LC_ALL, ""); + (void) textdomain(TEXT_DOMAIN); + + if ((g_zfs = libzfs_init()) == NULL) { + (void) fprintf(stderr, gettext("internal error: failed to " + "initialize ZFS library\n")); + return (1); + } + + libzfs_print_on_error(g_zfs, B_TRUE); + + opterr = 0; + + /* + * Make sure the user has specified some command. + */ + if (argc < 2) { + (void) fprintf(stderr, gettext("missing command\n")); + usage(B_FALSE); + } + + cmdname = argv[1]; + + /* + * Special case '-?' + */ + if (strcmp(cmdname, "-?") == 0) + usage(B_TRUE); + + zpool_set_history_str("zpool", argc, argv, history_str); + verify(zpool_stage_history(g_zfs, history_str) == 0); + + /* + * Run the appropriate command. + */ + if (find_command_idx(cmdname, &i) == 0) { + current_command = &command_table[i]; + ret = command_table[i].func(argc - 1, argv + 1); + } else if (strchr(cmdname, '=')) { + verify(find_command_idx("set", &i) == 0); + current_command = &command_table[i]; + ret = command_table[i].func(argc, argv); + } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { + /* + * 'freeze' is a vile debugging abomination, so we treat + * it as such. + */ + char buf[16384]; + int fd = open(ZFS_DEV, O_RDWR); + (void) strcpy((void *)buf, argv[2]); + return (!!ioctl(fd, ZFS_IOC_POOL_FREEZE, buf)); + } else { + (void) fprintf(stderr, gettext("unrecognized " + "command '%s'\n"), cmdname); + usage(B_FALSE); + } + + libzfs_fini(g_zfs); + + /* + * The 'ZFS_ABORT' environment variable causes us to dump core on exit + * for the purposes of running ::findleaks. + */ + if (getenv("ZFS_ABORT") != NULL) { + (void) printf("dumping core by request\n"); + abort(); + } + + return (ret); +} diff --git a/external/cddl/osnet/dist/cmd/zpool/zpool_util.c b/external/cddl/osnet/dist/cmd/zpool/zpool_util.c new file mode 100644 index 000000000000..f44da4ff60f5 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zpool/zpool_util.c @@ -0,0 +1,104 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include + +#include "zpool_util.h" + +/* + * Utility function to guarantee malloc() success. + */ +void * +safe_malloc(size_t size) +{ + void *data; + + if ((data = calloc(1, size)) == NULL) { + (void) fprintf(stderr, "internal error: out of memory\n"); + exit(1); + } + + return (data); +} + +/* + * Same as above, but for strdup() + */ +char * +safe_strdup(const char *str) +{ + char *ret; + + if ((ret = strdup(str)) == NULL) { + (void) fprintf(stderr, "internal error: out of memory\n"); + exit(1); + } + + return (ret); +} + +/* + * Display an out of memory error message and abort the current program. + */ +void +zpool_no_memory(void) +{ + assert(errno == ENOMEM); + (void) fprintf(stderr, + gettext("internal error: out of memory\n")); + exit(1); +} + +/* + * Return the number of logs in supplied nvlist + */ +uint_t +num_logs(nvlist_t *nv) +{ + uint_t nlogs = 0; + uint_t c, children; + nvlist_t **child; + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + return (0); + + for (c = 0; c < children; c++) { + uint64_t is_log = B_FALSE; + + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + if (is_log) + nlogs++; + } + return (nlogs); +} diff --git a/external/cddl/osnet/dist/cmd/zpool/zpool_util.h b/external/cddl/osnet/dist/cmd/zpool/zpool_util.h new file mode 100644 index 000000000000..e82f3202af2a --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zpool/zpool_util.h @@ -0,0 +1,72 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef ZPOOL_UTIL_H +#define ZPOOL_UTIL_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Basic utility functions + */ +void *safe_malloc(size_t); +char *safe_strdup(const char *); +void zpool_no_memory(void); +uint_t num_logs(nvlist_t *nv); + +/* + * Virtual device functions + */ + +nvlist_t *make_root_vdev(zpool_handle_t *zhp, int force, int check_rep, + boolean_t isreplace, boolean_t dryrun, int argc, char **argv); + +/* + * Pool list functions + */ +int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **, + zpool_iter_f, void *); + +typedef struct zpool_list zpool_list_t; + +zpool_list_t *pool_list_get(int, char **, zprop_list_t **, int *); +void pool_list_update(zpool_list_t *); +int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *); +void pool_list_free(zpool_list_t *); +int pool_list_count(zpool_list_t *); +void pool_list_remove(zpool_list_t *, zpool_handle_t *); + +libzfs_handle_t *g_zfs; + +#ifdef __cplusplus +} +#endif + +#endif /* ZPOOL_UTIL_H */ diff --git a/external/cddl/osnet/dist/cmd/zpool/zpool_vdev.c b/external/cddl/osnet/dist/cmd/zpool/zpool_vdev.c new file mode 100644 index 000000000000..10007c14927f --- /dev/null +++ b/external/cddl/osnet/dist/cmd/zpool/zpool_vdev.c @@ -0,0 +1,1401 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Functions to convert between a list of vdevs and an nvlist representing the + * configuration. Each entry in the list can be one of: + * + * Device vdevs + * disk=(path=..., devid=...) + * file=(path=...) + * + * Group vdevs + * raidz[1|2]=(...) + * mirror=(...) + * + * Hot spares + * + * While the underlying implementation supports it, group vdevs cannot contain + * other group vdevs. All userland verification of devices is contained within + * this file. If successful, the nvlist returned can be passed directly to the + * kernel; we've done as much verification as possible in userland. + * + * Hot spares are a special case, and passed down as an array of disk vdevs, at + * the same level as the root of the vdev tree. + * + * The only function exported by this file is 'make_root_vdev'. The + * function performs several passes: + * + * 1. Construct the vdev specification. Performs syntax validation and + * makes sure each device is valid. + * 2. Check for devices in use. Using libdiskmgt, makes sure that no + * devices are also in use. Some can be overridden using the 'force' + * flag, others cannot. + * 3. Check for replication errors if the 'force' flag is not specified. + * validates that the replication level is consistent across the + * entire pool. + * 4. Call libzfs to label any whole disks with an EFI label. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zpool_util.h" + +#define DISK_ROOT "/dev/dsk" +#define RDISK_ROOT "/dev/rdsk" +#define BACKUP_SLICE "s2" + +/* + * For any given vdev specification, we can have multiple errors. The + * vdev_error() function keeps track of whether we have seen an error yet, and + * prints out a header if its the first error we've seen. + */ +boolean_t error_seen; +boolean_t is_force; + +/*PRINTFLIKE1*/ +static void +vdev_error(const char *fmt, ...) +{ + va_list ap; + + if (!error_seen) { + (void) fprintf(stderr, gettext("invalid vdev specification\n")); + if (!is_force) + (void) fprintf(stderr, gettext("use '-f' to override " + "the following errors:\n")); + else + (void) fprintf(stderr, gettext("the following errors " + "must be manually repaired:\n")); + error_seen = B_TRUE; + } + + va_start(ap, fmt); + (void) vfprintf(stderr, fmt, ap); + va_end(ap); +} + +static void +libdiskmgt_error(int error) +{ + /* + * ENXIO/ENODEV is a valid error message if the device doesn't live in + * /dev/dsk. Don't bother printing an error message in this case. + */ + if (error == ENXIO || error == ENODEV) + return; + + (void) fprintf(stderr, gettext("warning: device in use checking " + "failed: %s\n"), strerror(error)); +} + +/* + * Validate a device, passing the bulk of the work off to libdiskmgt. + */ +static int +check_slice(const char *path, int force, boolean_t wholedisk, boolean_t isspare) +{ + char *msg; + int error = 0; + dm_who_type_t who; + + if (force) + who = DM_WHO_ZPOOL_FORCE; + else if (isspare) + who = DM_WHO_ZPOOL_SPARE; + else + who = DM_WHO_ZPOOL; + + if (dm_inuse((char *)path, &msg, who, &error) || error) { + if (error != 0) { + libdiskmgt_error(error); + return (0); + } else { + vdev_error("%s", msg); + free(msg); + return (-1); + } + } + + /* + * If we're given a whole disk, ignore overlapping slices since we're + * about to label it anyway. + */ + error = 0; + if (!wholedisk && !force && + (dm_isoverlapping((char *)path, &msg, &error) || error)) { + if (error == 0) { + /* dm_isoverlapping returned -1 */ + vdev_error(gettext("%s overlaps with %s\n"), path, msg); + free(msg); + return (-1); + } else if (error != ENODEV) { + /* libdiskmgt's devcache only handles physical drives */ + libdiskmgt_error(error); + return (0); + } + } + + return (0); +} + + +/* + * Validate a whole disk. Iterate over all slices on the disk and make sure + * that none is in use by calling check_slice(). + */ +static int +check_disk(const char *name, dm_descriptor_t disk, int force, int isspare) +{ + dm_descriptor_t *drive, *media, *slice; + int err = 0; + int i; + int ret; + + /* + * Get the drive associated with this disk. This should never fail, + * because we already have an alias handle open for the device. + */ + if ((drive = dm_get_associated_descriptors(disk, DM_DRIVE, + &err)) == NULL || *drive == NULL) { + if (err) + libdiskmgt_error(err); + return (0); + } + + if ((media = dm_get_associated_descriptors(*drive, DM_MEDIA, + &err)) == NULL) { + dm_free_descriptors(drive); + if (err) + libdiskmgt_error(err); + return (0); + } + + dm_free_descriptors(drive); + + /* + * It is possible that the user has specified a removable media drive, + * and the media is not present. + */ + if (*media == NULL) { + dm_free_descriptors(media); + vdev_error(gettext("'%s' has no media in drive\n"), name); + return (-1); + } + + if ((slice = dm_get_associated_descriptors(*media, DM_SLICE, + &err)) == NULL) { + dm_free_descriptors(media); + if (err) + libdiskmgt_error(err); + return (0); + } + + dm_free_descriptors(media); + + ret = 0; + + /* + * Iterate over all slices and report any errors. We don't care about + * overlapping slices because we are using the whole disk. + */ + for (i = 0; slice[i] != NULL; i++) { + char *name = dm_get_name(slice[i], &err); + + if (check_slice(name, force, B_TRUE, isspare) != 0) + ret = -1; + + dm_free_name(name); + } + + dm_free_descriptors(slice); + return (ret); +} + +/* + * Validate a device. + */ +static int +check_device(const char *path, boolean_t force, boolean_t isspare) +{ + dm_descriptor_t desc; + int err; + char *dev; + + /* + * For whole disks, libdiskmgt does not include the leading dev path. + */ + dev = strrchr(path, '/'); + assert(dev != NULL); + dev++; + if ((desc = dm_get_descriptor_by_name(DM_ALIAS, dev, &err)) != NULL) { + err = check_disk(path, desc, force, isspare); + dm_free_descriptor(desc); + return (err); + } + + return (check_slice(path, force, B_FALSE, isspare)); +} + +/* + * Check that a file is valid. All we can do in this case is check that it's + * not in use by another pool, and not in use by swap. + */ +static int +check_file(const char *file, boolean_t force, boolean_t isspare) +{ + char *name; + int fd; + int ret = 0; + int err; + pool_state_t state; + boolean_t inuse; + + if (dm_inuse_swap(file, &err)) { + if (err) + libdiskmgt_error(err); + else + vdev_error(gettext("%s is currently used by swap. " + "Please see swap(1M).\n"), file); + return (-1); + } + + if ((fd = open(file, O_RDONLY)) < 0) + return (0); + + if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) { + const char *desc; + + switch (state) { + case POOL_STATE_ACTIVE: + desc = gettext("active"); + break; + + case POOL_STATE_EXPORTED: + desc = gettext("exported"); + break; + + case POOL_STATE_POTENTIALLY_ACTIVE: + desc = gettext("potentially active"); + break; + + default: + desc = gettext("unknown"); + break; + } + + /* + * Allow hot spares to be shared between pools. + */ + if (state == POOL_STATE_SPARE && isspare) + return (0); + + if (state == POOL_STATE_ACTIVE || + state == POOL_STATE_SPARE || !force) { + switch (state) { + case POOL_STATE_SPARE: + vdev_error(gettext("%s is reserved as a hot " + "spare for pool %s\n"), file, name); + break; + default: + vdev_error(gettext("%s is part of %s pool " + "'%s'\n"), file, desc, name); + break; + } + ret = -1; + } + + free(name); + } + + (void) close(fd); + return (ret); +} + + +/* + * By "whole disk" we mean an entire physical disk (something we can + * label, toggle the write cache on, etc.) as opposed to the full + * capacity of a pseudo-device such as lofi or did. We act as if we + * are labeling the disk, which should be a pretty good test of whether + * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if + * it isn't. + */ +static boolean_t +is_whole_disk(const char *arg) +{ + struct dk_gpt *label; + int fd; + char path[MAXPATHLEN]; + + (void) snprintf(path, sizeof (path), "%s%s%s", + RDISK_ROOT, strrchr(arg, '/'), BACKUP_SLICE); + if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) + return (B_FALSE); + if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) { + (void) close(fd); + return (B_FALSE); + } + efi_free(label); + (void) close(fd); + return (B_TRUE); +} + +/* + * Create a leaf vdev. Determine if this is a file or a device. If it's a + * device, fill in the device id to make a complete nvlist. Valid forms for a + * leaf vdev are: + * + * /dev/dsk/xxx Complete disk path + * /xxx Full path to file + * xxx Shorthand for /dev/dsk/xxx + */ +static nvlist_t * +make_leaf_vdev(const char *arg, uint64_t is_log) +{ + char path[MAXPATHLEN]; + struct stat64 statbuf; + nvlist_t *vdev = NULL; + char *type = NULL; + boolean_t wholedisk = B_FALSE; + + /* + * Determine what type of vdev this is, and put the full path into + * 'path'. We detect whether this is a device of file afterwards by + * checking the st_mode of the file. + */ + if (arg[0] == '/') { + /* + * Complete device or file path. Exact type is determined by + * examining the file descriptor afterwards. + */ + wholedisk = is_whole_disk(arg); + if (!wholedisk && (stat64(arg, &statbuf) != 0)) { + (void) fprintf(stderr, + gettext("cannot open '%s': %s\n"), + arg, strerror(errno)); + return (NULL); + } + + (void) strlcpy(path, arg, sizeof (path)); + } else { + /* + * This may be a short path for a device, or it could be total + * gibberish. Check to see if it's a known device in + * /dev/dsk/. As part of this check, see if we've been given a + * an entire disk (minus the slice number). + */ + (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, + arg); + wholedisk = is_whole_disk(path); + if (!wholedisk && (stat64(path, &statbuf) != 0)) { + /* + * If we got ENOENT, then the user gave us + * gibberish, so try to direct them with a + * reasonable error message. Otherwise, + * regurgitate strerror() since it's the best we + * can do. + */ + if (errno == ENOENT) { + (void) fprintf(stderr, + gettext("cannot open '%s': no such " + "device in %s\n"), arg, DISK_ROOT); + (void) fprintf(stderr, + gettext("must be a full path or " + "shorthand device name\n")); + return (NULL); + } else { + (void) fprintf(stderr, + gettext("cannot open '%s': %s\n"), + path, strerror(errno)); + return (NULL); + } + } + } + + /* + * Determine whether this is a device or a file. + */ + if (wholedisk || S_ISBLK(statbuf.st_mode)) { + type = VDEV_TYPE_DISK; + } else if (S_ISREG(statbuf.st_mode)) { + type = VDEV_TYPE_FILE; + } else { + (void) fprintf(stderr, gettext("cannot use '%s': must be a " + "block device or regular file\n"), path); + return (NULL); + } + + /* + * Finally, we have the complete device or file, and we know that it is + * acceptable to use. Construct the nvlist to describe this vdev. All + * vdevs have a 'path' element, and devices also have a 'devid' element. + */ + verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0); + verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0); + verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0); + verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0); + if (strcmp(type, VDEV_TYPE_DISK) == 0) + verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, + (uint64_t)wholedisk) == 0); + + /* + * For a whole disk, defer getting its devid until after labeling it. + */ + if (S_ISBLK(statbuf.st_mode) && !wholedisk) { + /* + * Get the devid for the device. + */ + int fd; + ddi_devid_t devid; + char *minor = NULL, *devid_str = NULL; + + if ((fd = open(path, O_RDONLY)) < 0) { + (void) fprintf(stderr, gettext("cannot open '%s': " + "%s\n"), path, strerror(errno)); + nvlist_free(vdev); + return (NULL); + } + + if (devid_get(fd, &devid) == 0) { + if (devid_get_minor_name(fd, &minor) == 0 && + (devid_str = devid_str_encode(devid, minor)) != + NULL) { + verify(nvlist_add_string(vdev, + ZPOOL_CONFIG_DEVID, devid_str) == 0); + } + if (devid_str != NULL) + devid_str_free(devid_str); + if (minor != NULL) + devid_str_free(minor); + devid_free(devid); + } + + (void) close(fd); + } + + return (vdev); +} + +/* + * Go through and verify the replication level of the pool is consistent. + * Performs the following checks: + * + * For the new spec, verifies that devices in mirrors and raidz are the + * same size. + * + * If the current configuration already has inconsistent replication + * levels, ignore any other potential problems in the new spec. + * + * Otherwise, make sure that the current spec (if there is one) and the new + * spec have consistent replication levels. + */ +typedef struct replication_level { + char *zprl_type; + uint64_t zprl_children; + uint64_t zprl_parity; +} replication_level_t; + +#define ZPOOL_FUZZ (16 * 1024 * 1024) + +/* + * Given a list of toplevel vdevs, return the current replication level. If + * the config is inconsistent, then NULL is returned. If 'fatal' is set, then + * an error message will be displayed for each self-inconsistent vdev. + */ +static replication_level_t * +get_replication(nvlist_t *nvroot, boolean_t fatal) +{ + nvlist_t **top; + uint_t t, toplevels; + nvlist_t **child; + uint_t c, children; + nvlist_t *nv; + char *type; + replication_level_t lastrep, rep, *ret; + boolean_t dontreport; + + ret = safe_malloc(sizeof (replication_level_t)); + + verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &top, &toplevels) == 0); + + lastrep.zprl_type = NULL; + for (t = 0; t < toplevels; t++) { + uint64_t is_log = B_FALSE; + + nv = top[t]; + + /* + * For separate logs we ignore the top level vdev replication + * constraints. + */ + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log); + if (is_log) + continue; + + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, + &type) == 0); + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) { + /* + * This is a 'file' or 'disk' vdev. + */ + rep.zprl_type = type; + rep.zprl_children = 1; + rep.zprl_parity = 0; + } else { + uint64_t vdev_size; + + /* + * This is a mirror or RAID-Z vdev. Go through and make + * sure the contents are all the same (files vs. disks), + * keeping track of the number of elements in the + * process. + * + * We also check that the size of each vdev (if it can + * be determined) is the same. + */ + rep.zprl_type = type; + rep.zprl_children = 0; + + if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { + verify(nvlist_lookup_uint64(nv, + ZPOOL_CONFIG_NPARITY, + &rep.zprl_parity) == 0); + assert(rep.zprl_parity != 0); + } else { + rep.zprl_parity = 0; + } + + /* + * The 'dontreport' variable indicates that we've + * already reported an error for this spec, so don't + * bother doing it again. + */ + type = NULL; + dontreport = 0; + vdev_size = -1ULL; + for (c = 0; c < children; c++) { + nvlist_t *cnv = child[c]; + char *path; + struct stat64 statbuf; + uint64_t size = -1ULL; + char *childtype; + int fd, err; + + rep.zprl_children++; + + verify(nvlist_lookup_string(cnv, + ZPOOL_CONFIG_TYPE, &childtype) == 0); + + /* + * If this is a replacing or spare vdev, then + * get the real first child of the vdev. + */ + if (strcmp(childtype, + VDEV_TYPE_REPLACING) == 0 || + strcmp(childtype, VDEV_TYPE_SPARE) == 0) { + nvlist_t **rchild; + uint_t rchildren; + + verify(nvlist_lookup_nvlist_array(cnv, + ZPOOL_CONFIG_CHILDREN, &rchild, + &rchildren) == 0); + assert(rchildren == 2); + cnv = rchild[0]; + + verify(nvlist_lookup_string(cnv, + ZPOOL_CONFIG_TYPE, + &childtype) == 0); + } + + verify(nvlist_lookup_string(cnv, + ZPOOL_CONFIG_PATH, &path) == 0); + + /* + * If we have a raidz/mirror that combines disks + * with files, report it as an error. + */ + if (!dontreport && type != NULL && + strcmp(type, childtype) != 0) { + if (ret != NULL) + free(ret); + ret = NULL; + if (fatal) + vdev_error(gettext( + "mismatched replication " + "level: %s contains both " + "files and devices\n"), + rep.zprl_type); + else + return (NULL); + dontreport = B_TRUE; + } + + /* + * According to stat(2), the value of 'st_size' + * is undefined for block devices and character + * devices. But there is no effective way to + * determine the real size in userland. + * + * Instead, we'll take advantage of an + * implementation detail of spec_size(). If the + * device is currently open, then we (should) + * return a valid size. + * + * If we still don't get a valid size (indicated + * by a size of 0 or MAXOFFSET_T), then ignore + * this device altogether. + */ + if ((fd = open(path, O_RDONLY)) >= 0) { + err = fstat64(fd, &statbuf); + (void) close(fd); + } else { + err = stat64(path, &statbuf); + } + + if (err != 0 || + statbuf.st_size == 0 || + statbuf.st_size == MAXOFFSET_T) + continue; + + size = statbuf.st_size; + + /* + * Also make sure that devices and + * slices have a consistent size. If + * they differ by a significant amount + * (~16MB) then report an error. + */ + if (!dontreport && + (vdev_size != -1ULL && + (labs(size - vdev_size) > + ZPOOL_FUZZ))) { + if (ret != NULL) + free(ret); + ret = NULL; + if (fatal) + vdev_error(gettext( + "%s contains devices of " + "different sizes\n"), + rep.zprl_type); + else + return (NULL); + dontreport = B_TRUE; + } + + type = childtype; + vdev_size = size; + } + } + + /* + * At this point, we have the replication of the last toplevel + * vdev in 'rep'. Compare it to 'lastrep' to see if its + * different. + */ + if (lastrep.zprl_type != NULL) { + if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) { + if (ret != NULL) + free(ret); + ret = NULL; + if (fatal) + vdev_error(gettext( + "mismatched replication level: " + "both %s and %s vdevs are " + "present\n"), + lastrep.zprl_type, rep.zprl_type); + else + return (NULL); + } else if (lastrep.zprl_parity != rep.zprl_parity) { + if (ret) + free(ret); + ret = NULL; + if (fatal) + vdev_error(gettext( + "mismatched replication level: " + "both %llu and %llu device parity " + "%s vdevs are present\n"), + lastrep.zprl_parity, + rep.zprl_parity, + rep.zprl_type); + else + return (NULL); + } else if (lastrep.zprl_children != rep.zprl_children) { + if (ret) + free(ret); + ret = NULL; + if (fatal) + vdev_error(gettext( + "mismatched replication level: " + "both %llu-way and %llu-way %s " + "vdevs are present\n"), + lastrep.zprl_children, + rep.zprl_children, + rep.zprl_type); + else + return (NULL); + } + } + lastrep = rep; + } + + if (ret != NULL) + *ret = rep; + + return (ret); +} + +/* + * Check the replication level of the vdev spec against the current pool. Calls + * get_replication() to make sure the new spec is self-consistent. If the pool + * has a consistent replication level, then we ignore any errors. Otherwise, + * report any difference between the two. + */ +static int +check_replication(nvlist_t *config, nvlist_t *newroot) +{ + nvlist_t **child; + uint_t children; + replication_level_t *current = NULL, *new; + int ret; + + /* + * If we have a current pool configuration, check to see if it's + * self-consistent. If not, simply return success. + */ + if (config != NULL) { + nvlist_t *nvroot; + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if ((current = get_replication(nvroot, B_FALSE)) == NULL) + return (0); + } + /* + * for spares there may be no children, and therefore no + * replication level to check + */ + if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) || (children == 0)) { + free(current); + return (0); + } + + /* + * If all we have is logs then there's no replication level to check. + */ + if (num_logs(newroot) == children) { + free(current); + return (0); + } + + /* + * Get the replication level of the new vdev spec, reporting any + * inconsistencies found. + */ + if ((new = get_replication(newroot, B_TRUE)) == NULL) { + free(current); + return (-1); + } + + /* + * Check to see if the new vdev spec matches the replication level of + * the current pool. + */ + ret = 0; + if (current != NULL) { + if (strcmp(current->zprl_type, new->zprl_type) != 0) { + vdev_error(gettext( + "mismatched replication level: pool uses %s " + "and new vdev is %s\n"), + current->zprl_type, new->zprl_type); + ret = -1; + } else if (current->zprl_parity != new->zprl_parity) { + vdev_error(gettext( + "mismatched replication level: pool uses %llu " + "device parity and new vdev uses %llu\n"), + current->zprl_parity, new->zprl_parity); + ret = -1; + } else if (current->zprl_children != new->zprl_children) { + vdev_error(gettext( + "mismatched replication level: pool uses %llu-way " + "%s and new vdev uses %llu-way %s\n"), + current->zprl_children, current->zprl_type, + new->zprl_children, new->zprl_type); + ret = -1; + } + } + + free(new); + if (current != NULL) + free(current); + + return (ret); +} + +/* + * Go through and find any whole disks in the vdev specification, labelling them + * as appropriate. When constructing the vdev spec, we were unable to open this + * device in order to provide a devid. Now that we have labelled the disk and + * know that slice 0 is valid, we can construct the devid now. + * + * If the disk was already labeled with an EFI label, we will have gotten the + * devid already (because we were able to open the whole disk). Otherwise, we + * need to get the devid after we label the disk. + */ +static int +make_disks(zpool_handle_t *zhp, nvlist_t *nv) +{ + nvlist_t **child; + uint_t c, children; + char *type, *path, *diskname; + char buf[MAXPATHLEN]; + uint64_t wholedisk; + int fd; + int ret; + ddi_devid_t devid; + char *minor = NULL, *devid_str = NULL; + + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) { + + if (strcmp(type, VDEV_TYPE_DISK) != 0) + return (0); + + /* + * We have a disk device. Get the path to the device + * and see if it's a whole disk by appending the backup + * slice and stat()ing the device. + */ + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + &wholedisk) != 0 || !wholedisk) + return (0); + + diskname = strrchr(path, '/'); + assert(diskname != NULL); + diskname++; + if (zpool_label_disk(g_zfs, zhp, diskname) == -1) + return (-1); + + /* + * Fill in the devid, now that we've labeled the disk. + */ + (void) snprintf(buf, sizeof (buf), "%ss0", path); + if ((fd = open(buf, O_RDONLY)) < 0) { + (void) fprintf(stderr, + gettext("cannot open '%s': %s\n"), + buf, strerror(errno)); + return (-1); + } + + if (devid_get(fd, &devid) == 0) { + if (devid_get_minor_name(fd, &minor) == 0 && + (devid_str = devid_str_encode(devid, minor)) != + NULL) { + verify(nvlist_add_string(nv, + ZPOOL_CONFIG_DEVID, devid_str) == 0); + } + if (devid_str != NULL) + devid_str_free(devid_str); + if (minor != NULL) + devid_str_free(minor); + devid_free(devid); + } + + /* + * Update the path to refer to the 's0' slice. The presence of + * the 'whole_disk' field indicates to the CLI that we should + * chop off the slice number when displaying the device in + * future output. + */ + verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, buf) == 0); + + (void) close(fd); + + return (0); + } + + for (c = 0; c < children; c++) + if ((ret = make_disks(zhp, child[c])) != 0) + return (ret); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &child, &children) == 0) + for (c = 0; c < children; c++) + if ((ret = make_disks(zhp, child[c])) != 0) + return (ret); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &child, &children) == 0) + for (c = 0; c < children; c++) + if ((ret = make_disks(zhp, child[c])) != 0) + return (ret); + + return (0); +} + +/* + * Determine if the given path is a hot spare within the given configuration. + */ +static boolean_t +is_spare(nvlist_t *config, const char *path) +{ + int fd; + pool_state_t state; + char *name = NULL; + nvlist_t *label; + uint64_t guid, spareguid; + nvlist_t *nvroot; + nvlist_t **spares; + uint_t i, nspares; + boolean_t inuse; + + if ((fd = open(path, O_RDONLY)) < 0) + return (B_FALSE); + + if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 || + !inuse || + state != POOL_STATE_SPARE || + zpool_read_label(fd, &label) != 0) { + free(name); + (void) close(fd); + return (B_FALSE); + } + free(name); + + (void) close(fd); + verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0); + nvlist_free(label); + + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + for (i = 0; i < nspares; i++) { + verify(nvlist_lookup_uint64(spares[i], + ZPOOL_CONFIG_GUID, &spareguid) == 0); + if (spareguid == guid) + return (B_TRUE); + } + } + + return (B_FALSE); +} + +/* + * Go through and find any devices that are in use. We rely on libdiskmgt for + * the majority of this task. + */ +static int +check_in_use(nvlist_t *config, nvlist_t *nv, int force, int isreplacing, + int isspare) +{ + nvlist_t **child; + uint_t c, children; + char *type, *path; + int ret; + char buf[MAXPATHLEN]; + uint64_t wholedisk; + + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) { + + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); + + /* + * As a generic check, we look to see if this is a replace of a + * hot spare within the same pool. If so, we allow it + * regardless of what libdiskmgt or zpool_in_use() says. + */ + if (isreplacing) { + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + &wholedisk) == 0 && wholedisk) + (void) snprintf(buf, sizeof (buf), "%ss0", + path); + else + (void) strlcpy(buf, path, sizeof (buf)); + if (is_spare(config, buf)) + return (0); + } + + if (strcmp(type, VDEV_TYPE_DISK) == 0) + ret = check_device(path, force, isspare); + + if (strcmp(type, VDEV_TYPE_FILE) == 0) + ret = check_file(path, force, isspare); + + return (ret); + } + + for (c = 0; c < children; c++) + if ((ret = check_in_use(config, child[c], force, + isreplacing, B_FALSE)) != 0) + return (ret); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &child, &children) == 0) + for (c = 0; c < children; c++) + if ((ret = check_in_use(config, child[c], force, + isreplacing, B_TRUE)) != 0) + return (ret); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &child, &children) == 0) + for (c = 0; c < children; c++) + if ((ret = check_in_use(config, child[c], force, + isreplacing, B_FALSE)) != 0) + return (ret); + + return (0); +} + +static const char * +is_grouping(const char *type, int *mindev) +{ + if (strcmp(type, "raidz") == 0 || strcmp(type, "raidz1") == 0) { + if (mindev != NULL) + *mindev = 2; + return (VDEV_TYPE_RAIDZ); + } + + if (strcmp(type, "raidz2") == 0) { + if (mindev != NULL) + *mindev = 3; + return (VDEV_TYPE_RAIDZ); + } + + if (strcmp(type, "mirror") == 0) { + if (mindev != NULL) + *mindev = 2; + return (VDEV_TYPE_MIRROR); + } + + if (strcmp(type, "spare") == 0) { + if (mindev != NULL) + *mindev = 1; + return (VDEV_TYPE_SPARE); + } + + if (strcmp(type, "log") == 0) { + if (mindev != NULL) + *mindev = 1; + return (VDEV_TYPE_LOG); + } + + if (strcmp(type, "cache") == 0) { + if (mindev != NULL) + *mindev = 1; + return (VDEV_TYPE_L2CACHE); + } + + return (NULL); +} + +/* + * Construct a syntactically valid vdev specification, + * and ensure that all devices and files exist and can be opened. + * Note: we don't bother freeing anything in the error paths + * because the program is just going to exit anyway. + */ +nvlist_t * +construct_spec(int argc, char **argv) +{ + nvlist_t *nvroot, *nv, **top, **spares, **l2cache; + int t, toplevels, mindev, nspares, nlogs, nl2cache; + const char *type; + uint64_t is_log; + boolean_t seen_logs; + + top = NULL; + toplevels = 0; + spares = NULL; + l2cache = NULL; + nspares = 0; + nlogs = 0; + nl2cache = 0; + is_log = B_FALSE; + seen_logs = B_FALSE; + + while (argc > 0) { + nv = NULL; + + /* + * If it's a mirror or raidz, the subsequent arguments are + * its leaves -- until we encounter the next mirror or raidz. + */ + if ((type = is_grouping(argv[0], &mindev)) != NULL) { + nvlist_t **child = NULL; + int c, children = 0; + + if (strcmp(type, VDEV_TYPE_SPARE) == 0) { + if (spares != NULL) { + (void) fprintf(stderr, + gettext("invalid vdev " + "specification: 'spare' can be " + "specified only once\n")); + return (NULL); + } + is_log = B_FALSE; + } + + if (strcmp(type, VDEV_TYPE_LOG) == 0) { + if (seen_logs) { + (void) fprintf(stderr, + gettext("invalid vdev " + "specification: 'log' can be " + "specified only once\n")); + return (NULL); + } + seen_logs = B_TRUE; + is_log = B_TRUE; + argc--; + argv++; + /* + * A log is not a real grouping device. + * We just set is_log and continue. + */ + continue; + } + + if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) { + if (l2cache != NULL) { + (void) fprintf(stderr, + gettext("invalid vdev " + "specification: 'cache' can be " + "specified only once\n")); + return (NULL); + } + is_log = B_FALSE; + } + + if (is_log) { + if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { + (void) fprintf(stderr, + gettext("invalid vdev " + "specification: unsupported 'log' " + "device: %s\n"), type); + return (NULL); + } + nlogs++; + } + + for (c = 1; c < argc; c++) { + if (is_grouping(argv[c], NULL) != NULL) + break; + children++; + child = realloc(child, + children * sizeof (nvlist_t *)); + if (child == NULL) + zpool_no_memory(); + if ((nv = make_leaf_vdev(argv[c], B_FALSE)) + == NULL) + return (NULL); + child[children - 1] = nv; + } + + if (children < mindev) { + (void) fprintf(stderr, gettext("invalid vdev " + "specification: %s requires at least %d " + "devices\n"), argv[0], mindev); + return (NULL); + } + + argc -= c; + argv += c; + + if (strcmp(type, VDEV_TYPE_SPARE) == 0) { + spares = child; + nspares = children; + continue; + } else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) { + l2cache = child; + nl2cache = children; + continue; + } else { + verify(nvlist_alloc(&nv, NV_UNIQUE_NAME, + 0) == 0); + verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE, + type) == 0); + verify(nvlist_add_uint64(nv, + ZPOOL_CONFIG_IS_LOG, is_log) == 0); + if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) { + verify(nvlist_add_uint64(nv, + ZPOOL_CONFIG_NPARITY, + mindev - 1) == 0); + } + verify(nvlist_add_nvlist_array(nv, + ZPOOL_CONFIG_CHILDREN, child, + children) == 0); + + for (c = 0; c < children; c++) + nvlist_free(child[c]); + free(child); + } + } else { + /* + * We have a device. Pass off to make_leaf_vdev() to + * construct the appropriate nvlist describing the vdev. + */ + if ((nv = make_leaf_vdev(argv[0], is_log)) == NULL) + return (NULL); + if (is_log) + nlogs++; + argc--; + argv++; + } + + toplevels++; + top = realloc(top, toplevels * sizeof (nvlist_t *)); + if (top == NULL) + zpool_no_memory(); + top[toplevels - 1] = nv; + } + + if (toplevels == 0 && nspares == 0 && nl2cache == 0) { + (void) fprintf(stderr, gettext("invalid vdev " + "specification: at least one toplevel vdev must be " + "specified\n")); + return (NULL); + } + + if (seen_logs && nlogs == 0) { + (void) fprintf(stderr, gettext("invalid vdev specification: " + "log requires at least 1 device\n")); + return (NULL); + } + + /* + * Finally, create nvroot and add all top-level vdevs to it. + */ + verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0); + verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, + VDEV_TYPE_ROOT) == 0); + verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + top, toplevels) == 0); + if (nspares != 0) + verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + spares, nspares) == 0); + if (nl2cache != 0) + verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + l2cache, nl2cache) == 0); + + for (t = 0; t < toplevels; t++) + nvlist_free(top[t]); + for (t = 0; t < nspares; t++) + nvlist_free(spares[t]); + for (t = 0; t < nl2cache; t++) + nvlist_free(l2cache[t]); + if (spares) + free(spares); + if (l2cache) + free(l2cache); + free(top); + + return (nvroot); +} + + +/* + * Get and validate the contents of the given vdev specification. This ensures + * that the nvlist returned is well-formed, that all the devices exist, and that + * they are not currently in use by any other known consumer. The 'poolconfig' + * parameter is the current configuration of the pool when adding devices + * existing pool, and is used to perform additional checks, such as changing the + * replication level of the pool. It can be 'NULL' to indicate that this is a + * new pool. The 'force' flag controls whether devices should be forcefully + * added, even if they appear in use. + */ +nvlist_t * +make_root_vdev(zpool_handle_t *zhp, int force, int check_rep, + boolean_t isreplacing, boolean_t dryrun, int argc, char **argv) +{ + nvlist_t *newroot; + nvlist_t *poolconfig = NULL; + is_force = force; + + /* + * Construct the vdev specification. If this is successful, we know + * that we have a valid specification, and that all devices can be + * opened. + */ + if ((newroot = construct_spec(argc, argv)) == NULL) + return (NULL); + + if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL)) + return (NULL); + + /* + * Validate each device to make sure that its not shared with another + * subsystem. We do this even if 'force' is set, because there are some + * uses (such as a dedicated dump device) that even '-f' cannot + * override. + */ + if (check_in_use(poolconfig, newroot, force, isreplacing, + B_FALSE) != 0) { + nvlist_free(newroot); + return (NULL); + } + + /* + * Check the replication level of the given vdevs and report any errors + * found. We include the existing pool spec, if any, as we need to + * catch changes against the existing replication level. + */ + if (check_rep && check_replication(poolconfig, newroot) != 0) { + nvlist_free(newroot); + return (NULL); + } + + /* + * Run through the vdev specification and label any whole disks found. + */ + if (!dryrun && make_disks(zhp, newroot) != 0) { + nvlist_free(newroot); + return (NULL); + } + + return (newroot); +} diff --git a/external/cddl/osnet/dist/cmd/ztest/ztest.c b/external/cddl/osnet/dist/cmd/ztest/ztest.c new file mode 100644 index 000000000000..53cc6c7093b7 --- /dev/null +++ b/external/cddl/osnet/dist/cmd/ztest/ztest.c @@ -0,0 +1,3475 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * The objective of this program is to provide a DMU/ZAP/SPA stress test + * that runs entirely in userland, is easy to use, and easy to extend. + * + * The overall design of the ztest program is as follows: + * + * (1) For each major functional area (e.g. adding vdevs to a pool, + * creating and destroying datasets, reading and writing objects, etc) + * we have a simple routine to test that functionality. These + * individual routines do not have to do anything "stressful". + * + * (2) We turn these simple functionality tests into a stress test by + * running them all in parallel, with as many threads as desired, + * and spread across as many datasets, objects, and vdevs as desired. + * + * (3) While all this is happening, we inject faults into the pool to + * verify that self-healing data really works. + * + * (4) Every time we open a dataset, we change its checksum and compression + * functions. Thus even individual objects vary from block to block + * in which checksum they use and whether they're compressed. + * + * (5) To verify that we never lose on-disk consistency after a crash, + * we run the entire test in a child of the main process. + * At random times, the child self-immolates with a SIGKILL. + * This is the software equivalent of pulling the power cord. + * The parent then runs the test again, using the existing + * storage pool, as many times as desired. + * + * (6) To verify that we don't have future leaks or temporal incursions, + * many of the functional tests record the transaction group number + * as part of their data. When reading old data, they verify that + * the transaction group number is less than the current, open txg. + * If you add a new test, please do this if applicable. + * + * When run with no arguments, ztest runs for about five minutes and + * produces no output if successful. To get a little bit of information, + * specify -V. To get more information, specify -VV, and so on. + * + * To turn this into an overnight stress test, use -T to specify run time. + * + * You can ask more more vdevs [-v], datasets [-d], or threads [-t] + * to increase the pool capacity, fanout, and overall stress level. + * + * The -N(okill) option will suppress kills, so each child runs to completion. + * This can be useful when you're trying to distinguish temporal incursions + * from plain old race conditions. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char cmdname[] = "ztest"; +static char *zopt_pool = cmdname; + +static uint64_t zopt_vdevs = 5; +static uint64_t zopt_vdevtime; +static int zopt_ashift = SPA_MINBLOCKSHIFT; +static int zopt_mirrors = 2; +static int zopt_raidz = 4; +static int zopt_raidz_parity = 1; +static size_t zopt_vdev_size = SPA_MINDEVSIZE; +static int zopt_datasets = 7; +static int zopt_threads = 23; +static uint64_t zopt_passtime = 60; /* 60 seconds */ +static uint64_t zopt_killrate = 70; /* 70% kill rate */ +static int zopt_verbose = 0; +static int zopt_init = 1; +static char *zopt_dir = "/tmp"; +static uint64_t zopt_time = 300; /* 5 minutes */ +static int zopt_maxfaults; + +typedef struct ztest_block_tag { + uint64_t bt_objset; + uint64_t bt_object; + uint64_t bt_offset; + uint64_t bt_txg; + uint64_t bt_thread; + uint64_t bt_seq; +} ztest_block_tag_t; + +typedef struct ztest_args { + char za_pool[MAXNAMELEN]; + spa_t *za_spa; + objset_t *za_os; + zilog_t *za_zilog; + thread_t za_thread; + uint64_t za_instance; + uint64_t za_random; + uint64_t za_diroff; + uint64_t za_diroff_shared; + uint64_t za_zil_seq; + hrtime_t za_start; + hrtime_t za_stop; + hrtime_t za_kill; + /* + * Thread-local variables can go here to aid debugging. + */ + ztest_block_tag_t za_rbt; + ztest_block_tag_t za_wbt; + dmu_object_info_t za_doi; + dmu_buf_t *za_dbuf; +} ztest_args_t; + +typedef void ztest_func_t(ztest_args_t *); + +/* + * Note: these aren't static because we want dladdr() to work. + */ +ztest_func_t ztest_dmu_read_write; +ztest_func_t ztest_dmu_write_parallel; +ztest_func_t ztest_dmu_object_alloc_free; +ztest_func_t ztest_zap; +ztest_func_t ztest_zap_parallel; +ztest_func_t ztest_traverse; +ztest_func_t ztest_dsl_prop_get_set; +ztest_func_t ztest_dmu_objset_create_destroy; +ztest_func_t ztest_dmu_snapshot_create_destroy; +ztest_func_t ztest_spa_create_destroy; +ztest_func_t ztest_fault_inject; +ztest_func_t ztest_spa_rename; +ztest_func_t ztest_vdev_attach_detach; +ztest_func_t ztest_vdev_LUN_growth; +ztest_func_t ztest_vdev_add_remove; +ztest_func_t ztest_vdev_aux_add_remove; +ztest_func_t ztest_scrub; + +typedef struct ztest_info { + ztest_func_t *zi_func; /* test function */ + uint64_t zi_iters; /* iterations per execution */ + uint64_t *zi_interval; /* execute every seconds */ + uint64_t zi_calls; /* per-pass count */ + uint64_t zi_call_time; /* per-pass time */ + uint64_t zi_call_total; /* cumulative total */ + uint64_t zi_call_target; /* target cumulative total */ +} ztest_info_t; + +uint64_t zopt_always = 0; /* all the time */ +uint64_t zopt_often = 1; /* every second */ +uint64_t zopt_sometimes = 10; /* every 10 seconds */ +uint64_t zopt_rarely = 60; /* every 60 seconds */ + +ztest_info_t ztest_info[] = { + { ztest_dmu_read_write, 1, &zopt_always }, + { ztest_dmu_write_parallel, 30, &zopt_always }, + { ztest_dmu_object_alloc_free, 1, &zopt_always }, + { ztest_zap, 30, &zopt_always }, + { ztest_zap_parallel, 100, &zopt_always }, + { ztest_dsl_prop_get_set, 1, &zopt_sometimes }, + { ztest_dmu_objset_create_destroy, 1, &zopt_sometimes }, + { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes }, + { ztest_spa_create_destroy, 1, &zopt_sometimes }, + { ztest_fault_inject, 1, &zopt_sometimes }, + { ztest_spa_rename, 1, &zopt_rarely }, + { ztest_vdev_attach_detach, 1, &zopt_rarely }, + { ztest_vdev_LUN_growth, 1, &zopt_rarely }, + { ztest_vdev_add_remove, 1, &zopt_vdevtime }, + { ztest_vdev_aux_add_remove, 1, &zopt_vdevtime }, + { ztest_scrub, 1, &zopt_vdevtime }, +}; + +#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t)) + +#define ZTEST_SYNC_LOCKS 16 + +/* + * Stuff we need to share writably between parent and child. + */ +typedef struct ztest_shared { + mutex_t zs_vdev_lock; + rwlock_t zs_name_lock; + uint64_t zs_vdev_primaries; + uint64_t zs_vdev_aux; + uint64_t zs_enospc_count; + hrtime_t zs_start_time; + hrtime_t zs_stop_time; + uint64_t zs_alloc; + uint64_t zs_space; + ztest_info_t zs_info[ZTEST_FUNCS]; + mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS]; + uint64_t zs_seq[ZTEST_SYNC_LOCKS]; +} ztest_shared_t; + +static char ztest_dev_template[] = "%s/%s.%llua"; +static char ztest_aux_template[] = "%s/%s.%s.%llu"; +static ztest_shared_t *ztest_shared; + +static int ztest_random_fd; +static int ztest_dump_core = 1; + +static boolean_t ztest_exiting; + +extern uint64_t metaslab_gang_bang; + +#define ZTEST_DIROBJ 1 +#define ZTEST_MICROZAP_OBJ 2 +#define ZTEST_FATZAP_OBJ 3 + +#define ZTEST_DIROBJ_BLOCKSIZE (1 << 10) +#define ZTEST_DIRSIZE 256 + +static void usage(boolean_t) __NORETURN; + +/* + * These libumem hooks provide a reasonable set of defaults for the allocator's + * debugging facilities. + */ +const char * +_umem_debug_init() +{ + return ("default,verbose"); /* $UMEM_DEBUG setting */ +} + +const char * +_umem_logging_init(void) +{ + return ("fail,contents"); /* $UMEM_LOGGING setting */ +} + +#define FATAL_MSG_SZ 1024 + +char *fatal_msg; + +static void +fatal(int do_perror, char *message, ...) +{ + va_list args; + int save_errno = errno; + char buf[FATAL_MSG_SZ]; + + (void) fflush(stdout); + + va_start(args, message); + (void) sprintf(buf, "ztest: "); + /* LINTED */ + (void) vsprintf(buf + strlen(buf), message, args); + va_end(args); + if (do_perror) { + (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf), + ": %s", strerror(save_errno)); + } + (void) fprintf(stderr, "%s\n", buf); + fatal_msg = buf; /* to ease debugging */ + if (ztest_dump_core) + abort(); + exit(3); +} + +static int +str2shift(const char *buf) +{ + const char *ends = "BKMGTPEZ"; + int i; + + if (buf[0] == '\0') + return (0); + for (i = 0; i < strlen(ends); i++) { + if (toupper(buf[0]) == ends[i]) + break; + } + if (i == strlen(ends)) { + (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", + buf); + usage(B_FALSE); + } + if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) { + return (10*i); + } + (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf); + usage(B_FALSE); + /* NOTREACHED */ +} + +static uint64_t +nicenumtoull(const char *buf) +{ + char *end; + uint64_t val; + + val = strtoull(buf, &end, 0); + if (end == buf) { + (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf); + usage(B_FALSE); + } else if (end[0] == '.') { + double fval = strtod(buf, &end); + fval *= pow(2, str2shift(end)); + if (fval > UINT64_MAX) { + (void) fprintf(stderr, "ztest: value too large: %s\n", + buf); + usage(B_FALSE); + } + val = (uint64_t)fval; + } else { + int shift = str2shift(end); + if (shift >= 64 || (val << shift) >> shift != val) { + (void) fprintf(stderr, "ztest: value too large: %s\n", + buf); + usage(B_FALSE); + } + val <<= shift; + } + return (val); +} + +static void +usage(boolean_t requested) +{ + char nice_vdev_size[10]; + char nice_gang_bang[10]; + FILE *fp = requested ? stdout : stderr; + + nicenum(zopt_vdev_size, nice_vdev_size); + nicenum(metaslab_gang_bang, nice_gang_bang); + + (void) fprintf(fp, "Usage: %s\n" + "\t[-v vdevs (default: %llu)]\n" + "\t[-s size_of_each_vdev (default: %s)]\n" + "\t[-a alignment_shift (default: %d) (use 0 for random)]\n" + "\t[-m mirror_copies (default: %d)]\n" + "\t[-r raidz_disks (default: %d)]\n" + "\t[-R raidz_parity (default: %d)]\n" + "\t[-d datasets (default: %d)]\n" + "\t[-t threads (default: %d)]\n" + "\t[-g gang_block_threshold (default: %s)]\n" + "\t[-i initialize pool i times (default: %d)]\n" + "\t[-k kill percentage (default: %llu%%)]\n" + "\t[-p pool_name (default: %s)]\n" + "\t[-f file directory for vdev files (default: %s)]\n" + "\t[-V(erbose)] (use multiple times for ever more blather)\n" + "\t[-E(xisting)] (use existing pool instead of creating new one)\n" + "\t[-T time] total run time (default: %llu sec)\n" + "\t[-P passtime] time per pass (default: %llu sec)\n" + "\t[-h] (print help)\n" + "", + cmdname, + (u_longlong_t)zopt_vdevs, /* -v */ + nice_vdev_size, /* -s */ + zopt_ashift, /* -a */ + zopt_mirrors, /* -m */ + zopt_raidz, /* -r */ + zopt_raidz_parity, /* -R */ + zopt_datasets, /* -d */ + zopt_threads, /* -t */ + nice_gang_bang, /* -g */ + zopt_init, /* -i */ + (u_longlong_t)zopt_killrate, /* -k */ + zopt_pool, /* -p */ + zopt_dir, /* -f */ + (u_longlong_t)zopt_time, /* -T */ + (u_longlong_t)zopt_passtime); /* -P */ + exit(requested ? 0 : 1); +} + +static uint64_t +ztest_random(uint64_t range) +{ + uint64_t r; + + if (range == 0) + return (0); + + if (read(ztest_random_fd, &r, sizeof (r)) != sizeof (r)) + fatal(1, "short read from /dev/urandom"); + + return (r % range); +} + +static void +ztest_record_enospc(char *s) +{ + dprintf("ENOSPC doing: %s\n", s ? s : ""); + ztest_shared->zs_enospc_count++; +} + +static void +process_options(int argc, char **argv) +{ + int opt; + uint64_t value; + + /* By default, test gang blocks for blocks 32K and greater */ + metaslab_gang_bang = 32 << 10; + + while ((opt = getopt(argc, argv, + "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:h")) != EOF) { + value = 0; + switch (opt) { + case 'v': + case 's': + case 'a': + case 'm': + case 'r': + case 'R': + case 'd': + case 't': + case 'g': + case 'i': + case 'k': + case 'T': + case 'P': + value = nicenumtoull(optarg); + } + switch (opt) { + case 'v': + zopt_vdevs = value; + break; + case 's': + zopt_vdev_size = MAX(SPA_MINDEVSIZE, value); + break; + case 'a': + zopt_ashift = value; + break; + case 'm': + zopt_mirrors = value; + break; + case 'r': + zopt_raidz = MAX(1, value); + break; + case 'R': + zopt_raidz_parity = MIN(MAX(value, 1), 2); + break; + case 'd': + zopt_datasets = MAX(1, value); + break; + case 't': + zopt_threads = MAX(1, value); + break; + case 'g': + metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, value); + break; + case 'i': + zopt_init = value; + break; + case 'k': + zopt_killrate = value; + break; + case 'p': + zopt_pool = strdup(optarg); + break; + case 'f': + zopt_dir = strdup(optarg); + break; + case 'V': + zopt_verbose++; + break; + case 'E': + zopt_init = 0; + break; + case 'T': + zopt_time = value; + break; + case 'P': + zopt_passtime = MAX(1, value); + break; + case 'h': + usage(B_TRUE); + break; + case '?': + default: + usage(B_FALSE); + break; + } + } + + zopt_raidz_parity = MIN(zopt_raidz_parity, zopt_raidz - 1); + + zopt_vdevtime = (zopt_vdevs > 0 ? zopt_time / zopt_vdevs : UINT64_MAX); + zopt_maxfaults = MAX(zopt_mirrors, 1) * (zopt_raidz_parity + 1) - 1; +} + +static uint64_t +ztest_get_ashift(void) +{ + if (zopt_ashift == 0) + return (SPA_MINBLOCKSHIFT + ztest_random(3)); + return (zopt_ashift); +} + +static nvlist_t * +make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift) +{ + char pathbuf[MAXPATHLEN]; + uint64_t vdev; + nvlist_t *file; + + if (ashift == 0) + ashift = ztest_get_ashift(); + + if (path == NULL) { + path = pathbuf; + + if (aux != NULL) { + vdev = ztest_shared->zs_vdev_aux; + (void) sprintf(path, ztest_aux_template, + zopt_dir, zopt_pool, aux, vdev); + } else { + vdev = ztest_shared->zs_vdev_primaries++; + (void) sprintf(path, ztest_dev_template, + zopt_dir, zopt_pool, vdev); + } + } + + if (size != 0) { + int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666); + if (fd == -1) + fatal(1, "can't open %s", path); + if (ftruncate(fd, size) != 0) + fatal(1, "can't ftruncate %s", path); + (void) close(fd); + } + + VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0); + VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0); + VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0); + VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0); + + return (file); +} + +static nvlist_t * +make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r) +{ + nvlist_t *raidz, **child; + int c; + + if (r < 2) + return (make_vdev_file(path, aux, size, ashift)); + child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL); + + for (c = 0; c < r; c++) + child[c] = make_vdev_file(path, aux, size, ashift); + + VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0); + VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE, + VDEV_TYPE_RAIDZ) == 0); + VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY, + zopt_raidz_parity) == 0); + VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN, + child, r) == 0); + + for (c = 0; c < r; c++) + nvlist_free(child[c]); + + umem_free(child, r * sizeof (nvlist_t *)); + + return (raidz); +} + +static nvlist_t * +make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift, + int r, int m) +{ + nvlist_t *mirror, **child; + int c; + + if (m < 1) + return (make_vdev_raidz(path, aux, size, ashift, r)); + + child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL); + + for (c = 0; c < m; c++) + child[c] = make_vdev_raidz(path, aux, size, ashift, r); + + VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0); + VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE, + VDEV_TYPE_MIRROR) == 0); + VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN, + child, m) == 0); + + for (c = 0; c < m; c++) + nvlist_free(child[c]); + + umem_free(child, m * sizeof (nvlist_t *)); + + return (mirror); +} + +static nvlist_t * +make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift, + int log, int r, int m, int t) +{ + nvlist_t *root, **child; + int c; + + ASSERT(t > 0); + + child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL); + + for (c = 0; c < t; c++) { + child[c] = make_vdev_mirror(path, aux, size, ashift, r, m); + VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + log) == 0); + } + + VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0); + VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0); + VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN, + child, t) == 0); + + for (c = 0; c < t; c++) + nvlist_free(child[c]); + + umem_free(child, t * sizeof (nvlist_t *)); + + return (root); +} + +static void +ztest_set_random_blocksize(objset_t *os, uint64_t object, dmu_tx_t *tx) +{ + int bs = SPA_MINBLOCKSHIFT + + ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1); + int ibs = DN_MIN_INDBLKSHIFT + + ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1); + int error; + + error = dmu_object_set_blocksize(os, object, 1ULL << bs, ibs, tx); + if (error) { + char osname[300]; + dmu_objset_name(os, osname); + fatal(0, "dmu_object_set_blocksize('%s', %llu, %d, %d) = %d", + osname, object, 1 << bs, ibs, error); + } +} + +static uint8_t +ztest_random_checksum(void) +{ + uint8_t checksum; + + do { + checksum = ztest_random(ZIO_CHECKSUM_FUNCTIONS); + } while (zio_checksum_table[checksum].ci_zbt); + + if (checksum == ZIO_CHECKSUM_OFF) + checksum = ZIO_CHECKSUM_ON; + + return (checksum); +} + +static uint8_t +ztest_random_compress(void) +{ + return ((uint8_t)ztest_random(ZIO_COMPRESS_FUNCTIONS)); +} + +typedef struct ztest_replay { + objset_t *zr_os; + uint64_t zr_assign; +} ztest_replay_t; + +static int +ztest_replay_create(ztest_replay_t *zr, lr_create_t *lr, boolean_t byteswap) +{ + objset_t *os = zr->zr_os; + dmu_tx_t *tx; + int error; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + tx = dmu_tx_create(os); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + error = dmu_tx_assign(tx, zr->zr_assign); + if (error) { + dmu_tx_abort(tx); + return (error); + } + + error = dmu_object_claim(os, lr->lr_doid, lr->lr_mode, 0, + DMU_OT_NONE, 0, tx); + ASSERT3U(error, ==, 0); + dmu_tx_commit(tx); + + if (zopt_verbose >= 5) { + char osname[MAXNAMELEN]; + dmu_objset_name(os, osname); + (void) printf("replay create of %s object %llu" + " in txg %llu = %d\n", + osname, (u_longlong_t)lr->lr_doid, + (u_longlong_t)zr->zr_assign, error); + } + + return (error); +} + +static int +ztest_replay_remove(ztest_replay_t *zr, lr_remove_t *lr, boolean_t byteswap) +{ + objset_t *os = zr->zr_os; + dmu_tx_t *tx; + int error; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + tx = dmu_tx_create(os); + dmu_tx_hold_free(tx, lr->lr_doid, 0, DMU_OBJECT_END); + error = dmu_tx_assign(tx, zr->zr_assign); + if (error) { + dmu_tx_abort(tx); + return (error); + } + + error = dmu_object_free(os, lr->lr_doid, tx); + dmu_tx_commit(tx); + + return (error); +} + +zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = { + NULL, /* 0 no such transaction type */ + ztest_replay_create, /* TX_CREATE */ + NULL, /* TX_MKDIR */ + NULL, /* TX_MKXATTR */ + NULL, /* TX_SYMLINK */ + ztest_replay_remove, /* TX_REMOVE */ + NULL, /* TX_RMDIR */ + NULL, /* TX_LINK */ + NULL, /* TX_RENAME */ + NULL, /* TX_WRITE */ + NULL, /* TX_TRUNCATE */ + NULL, /* TX_SETATTR */ + NULL, /* TX_ACL */ +}; + +/* + * Verify that we can't destroy an active pool, create an existing pool, + * or create a pool with a bad vdev spec. + */ +void +ztest_spa_create_destroy(ztest_args_t *za) +{ + int error; + spa_t *spa; + nvlist_t *nvroot; + + /* + * Attempt to create using a bad file. + */ + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); + error = spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL); + nvlist_free(nvroot); + if (error != ENOENT) + fatal(0, "spa_create(bad_file) = %d", error); + + /* + * Attempt to create using a bad mirror. + */ + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1); + error = spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL); + nvlist_free(nvroot); + if (error != ENOENT) + fatal(0, "spa_create(bad_mirror) = %d", error); + + /* + * Attempt to create an existing pool. It shouldn't matter + * what's in the nvroot; we should fail with EEXIST. + */ + (void) rw_rdlock(&ztest_shared->zs_name_lock); + nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); + error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL); + nvlist_free(nvroot); + if (error != EEXIST) + fatal(0, "spa_create(whatever) = %d", error); + + error = spa_open(za->za_pool, &spa, FTAG); + if (error) + fatal(0, "spa_open() = %d", error); + + error = spa_destroy(za->za_pool); + if (error != EBUSY) + fatal(0, "spa_destroy() = %d", error); + + spa_close(spa, FTAG); + (void) rw_unlock(&ztest_shared->zs_name_lock); +} + +static vdev_t * +vdev_lookup_by_path(vdev_t *vd, const char *path) +{ + vdev_t *mvd; + + if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0) + return (vd); + + for (int c = 0; c < vd->vdev_children; c++) + if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) != + NULL) + return (mvd); + + return (NULL); +} + +/* + * Verify that vdev_add() works as expected. + */ +void +ztest_vdev_add_remove(ztest_args_t *za) +{ + spa_t *spa = za->za_spa; + uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; + nvlist_t *nvroot; + int error; + + (void) mutex_lock(&ztest_shared->zs_vdev_lock); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + ztest_shared->zs_vdev_primaries = + spa->spa_root_vdev->vdev_children * leaves; + + spa_config_exit(spa, SCL_VDEV, FTAG); + + /* + * Make 1/4 of the devices be log devices. + */ + nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, + ztest_random(4) == 0, zopt_raidz, zopt_mirrors, 1); + + error = spa_vdev_add(spa, nvroot); + nvlist_free(nvroot); + + (void) mutex_unlock(&ztest_shared->zs_vdev_lock); + + if (error == ENOSPC) + ztest_record_enospc("spa_vdev_add"); + else if (error != 0) + fatal(0, "spa_vdev_add() = %d", error); +} + +/* + * Verify that adding/removing aux devices (l2arc, hot spare) works as expected. + */ +void +ztest_vdev_aux_add_remove(ztest_args_t *za) +{ + spa_t *spa = za->za_spa; + vdev_t *rvd = spa->spa_root_vdev; + spa_aux_vdev_t *sav; + char *aux; + uint64_t guid = 0; + int error; + + if (ztest_random(2) == 0) { + sav = &spa->spa_spares; + aux = ZPOOL_CONFIG_SPARES; + } else { + sav = &spa->spa_l2cache; + aux = ZPOOL_CONFIG_L2CACHE; + } + + (void) mutex_lock(&ztest_shared->zs_vdev_lock); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + if (sav->sav_count != 0 && ztest_random(4) == 0) { + /* + * Pick a random device to remove. + */ + guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid; + } else { + /* + * Find an unused device we can add. + */ + ztest_shared->zs_vdev_aux = 0; + for (;;) { + char path[MAXPATHLEN]; + int c; + (void) sprintf(path, ztest_aux_template, zopt_dir, + zopt_pool, aux, ztest_shared->zs_vdev_aux); + for (c = 0; c < sav->sav_count; c++) + if (strcmp(sav->sav_vdevs[c]->vdev_path, + path) == 0) + break; + if (c == sav->sav_count && + vdev_lookup_by_path(rvd, path) == NULL) + break; + ztest_shared->zs_vdev_aux++; + } + } + + spa_config_exit(spa, SCL_VDEV, FTAG); + + if (guid == 0) { + /* + * Add a new device. + */ + nvlist_t *nvroot = make_vdev_root(NULL, aux, + (zopt_vdev_size * 5) / 4, 0, 0, 0, 0, 1); + error = spa_vdev_add(spa, nvroot); + if (error != 0) + fatal(0, "spa_vdev_add(%p) = %d", nvroot, error); + nvlist_free(nvroot); + } else { + /* + * Remove an existing device. Sometimes, dirty its + * vdev state first to make sure we handle removal + * of devices that have pending state changes. + */ + if (ztest_random(2) == 0) + (void) vdev_online(spa, guid, B_FALSE, NULL); + + error = spa_vdev_remove(spa, guid, B_FALSE); + if (error != 0 && error != EBUSY) + fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); + } + + (void) mutex_unlock(&ztest_shared->zs_vdev_lock); +} + +/* + * Verify that we can attach and detach devices. + */ +void +ztest_vdev_attach_detach(ztest_args_t *za) +{ + spa_t *spa = za->za_spa; + spa_aux_vdev_t *sav = &spa->spa_spares; + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *oldvd, *newvd, *pvd; + nvlist_t *root; + uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; + uint64_t leaf, top; + uint64_t ashift = ztest_get_ashift(); + uint64_t oldguid; + size_t oldsize, newsize; + char oldpath[MAXPATHLEN], newpath[MAXPATHLEN]; + int replacing; + int oldvd_has_siblings = B_FALSE; + int newvd_is_spare = B_FALSE; + int oldvd_is_log; + int error, expected_error; + + (void) mutex_lock(&ztest_shared->zs_vdev_lock); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + + /* + * Decide whether to do an attach or a replace. + */ + replacing = ztest_random(2); + + /* + * Pick a random top-level vdev. + */ + top = ztest_random(rvd->vdev_children); + + /* + * Pick a random leaf within it. + */ + leaf = ztest_random(leaves); + + /* + * Locate this vdev. + */ + oldvd = rvd->vdev_child[top]; + if (zopt_mirrors >= 1) + oldvd = oldvd->vdev_child[leaf / zopt_raidz]; + if (zopt_raidz > 1) + oldvd = oldvd->vdev_child[leaf % zopt_raidz]; + + /* + * If we're already doing an attach or replace, oldvd may be a + * mirror vdev -- in which case, pick a random child. + */ + while (oldvd->vdev_children != 0) { + oldvd_has_siblings = B_TRUE; + ASSERT(oldvd->vdev_children == 2); + oldvd = oldvd->vdev_child[ztest_random(2)]; + } + + oldguid = oldvd->vdev_guid; + oldsize = vdev_get_rsize(oldvd); + oldvd_is_log = oldvd->vdev_top->vdev_islog; + (void) strcpy(oldpath, oldvd->vdev_path); + pvd = oldvd->vdev_parent; + + /* + * If oldvd has siblings, then half of the time, detach it. + */ + if (oldvd_has_siblings && ztest_random(2) == 0) { + spa_config_exit(spa, SCL_VDEV, FTAG); + error = spa_vdev_detach(spa, oldguid, B_FALSE); + if (error != 0 && error != ENODEV && error != EBUSY) + fatal(0, "detach (%s) returned %d", + oldpath, error); + (void) mutex_unlock(&ztest_shared->zs_vdev_lock); + return; + } + + /* + * For the new vdev, choose with equal probability between the two + * standard paths (ending in either 'a' or 'b') or a random hot spare. + */ + if (sav->sav_count != 0 && ztest_random(3) == 0) { + newvd = sav->sav_vdevs[ztest_random(sav->sav_count)]; + newvd_is_spare = B_TRUE; + (void) strcpy(newpath, newvd->vdev_path); + } else { + (void) snprintf(newpath, sizeof (newpath), ztest_dev_template, + zopt_dir, zopt_pool, top * leaves + leaf); + if (ztest_random(2) == 0) + newpath[strlen(newpath) - 1] = 'b'; + newvd = vdev_lookup_by_path(rvd, newpath); + } + + if (newvd) { + newsize = vdev_get_rsize(newvd); + } else { + /* + * Make newsize a little bigger or smaller than oldsize. + * If it's smaller, the attach should fail. + * If it's larger, and we're doing a replace, + * we should get dynamic LUN growth when we're done. + */ + newsize = 10 * oldsize / (9 + ztest_random(3)); + } + + /* + * If pvd is not a mirror or root, the attach should fail with ENOTSUP, + * unless it's a replace; in that case any non-replacing parent is OK. + * + * If newvd is already part of the pool, it should fail with EBUSY. + * + * If newvd is too small, it should fail with EOVERFLOW. + */ + if (pvd->vdev_ops != &vdev_mirror_ops && + pvd->vdev_ops != &vdev_root_ops && (!replacing || + pvd->vdev_ops == &vdev_replacing_ops || + pvd->vdev_ops == &vdev_spare_ops)) + expected_error = ENOTSUP; + else if (newvd_is_spare && (!replacing || oldvd_is_log)) + expected_error = ENOTSUP; + else if (newvd == oldvd) + expected_error = replacing ? 0 : EBUSY; + else if (vdev_lookup_by_path(rvd, newpath) != NULL) + expected_error = EBUSY; + else if (newsize < oldsize) + expected_error = EOVERFLOW; + else if (ashift > oldvd->vdev_top->vdev_ashift) + expected_error = EDOM; + else + expected_error = 0; + + spa_config_exit(spa, SCL_VDEV, FTAG); + + /* + * Build the nvlist describing newpath. + */ + root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0, + ashift, 0, 0, 0, 1); + + error = spa_vdev_attach(spa, oldguid, root, replacing); + + nvlist_free(root); + + /* + * If our parent was the replacing vdev, but the replace completed, + * then instead of failing with ENOTSUP we may either succeed, + * fail with ENODEV, or fail with EOVERFLOW. + */ + if (expected_error == ENOTSUP && + (error == 0 || error == ENODEV || error == EOVERFLOW)) + expected_error = error; + + /* + * If someone grew the LUN, the replacement may be too small. + */ + if (error == EOVERFLOW || error == EBUSY) + expected_error = error; + + /* XXX workaround 6690467 */ + if (error != expected_error && expected_error != EBUSY) { + fatal(0, "attach (%s %llu, %s %llu, %d) " + "returned %d, expected %d", + oldpath, (longlong_t)oldsize, newpath, + (longlong_t)newsize, replacing, error, expected_error); + } + + (void) mutex_unlock(&ztest_shared->zs_vdev_lock); +} + +/* + * Verify that dynamic LUN growth works as expected. + */ +/* ARGSUSED */ +void +ztest_vdev_LUN_growth(ztest_args_t *za) +{ + spa_t *spa = za->za_spa; + char dev_name[MAXPATHLEN]; + uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; + uint64_t vdev; + size_t fsize; + int fd; + + (void) mutex_lock(&ztest_shared->zs_vdev_lock); + + /* + * Pick a random leaf vdev. + */ + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + vdev = ztest_random(spa->spa_root_vdev->vdev_children * leaves); + spa_config_exit(spa, SCL_VDEV, FTAG); + + (void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev); + + if ((fd = open(dev_name, O_RDWR)) != -1) { + /* + * Determine the size. + */ + fsize = lseek(fd, 0, SEEK_END); + + /* + * If it's less than 2x the original size, grow by around 3%. + */ + if (fsize < 2 * zopt_vdev_size) { + size_t newsize = fsize + ztest_random(fsize / 32); + (void) ftruncate(fd, newsize); + if (zopt_verbose >= 6) { + (void) printf("%s grew from %lu to %lu bytes\n", + dev_name, (ulong_t)fsize, (ulong_t)newsize); + } + } + (void) close(fd); + } + + (void) mutex_unlock(&ztest_shared->zs_vdev_lock); +} + +/* ARGSUSED */ +static void +ztest_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) +{ + /* + * Create the directory object. + */ + VERIFY(dmu_object_claim(os, ZTEST_DIROBJ, + DMU_OT_UINT64_OTHER, ZTEST_DIROBJ_BLOCKSIZE, + DMU_OT_UINT64_OTHER, 5 * sizeof (ztest_block_tag_t), tx) == 0); + + VERIFY(zap_create_claim(os, ZTEST_MICROZAP_OBJ, + DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); + + VERIFY(zap_create_claim(os, ZTEST_FATZAP_OBJ, + DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0); +} + +static int +ztest_destroy_cb(char *name, void *arg) +{ + ztest_args_t *za = arg; + objset_t *os; + dmu_object_info_t *doi = &za->za_doi; + int error; + + /* + * Verify that the dataset contains a directory object. + */ + error = dmu_objset_open(name, DMU_OST_OTHER, + DS_MODE_USER | DS_MODE_READONLY, &os); + ASSERT3U(error, ==, 0); + error = dmu_object_info(os, ZTEST_DIROBJ, doi); + if (error != ENOENT) { + /* We could have crashed in the middle of destroying it */ + ASSERT3U(error, ==, 0); + ASSERT3U(doi->doi_type, ==, DMU_OT_UINT64_OTHER); + ASSERT3S(doi->doi_physical_blks, >=, 0); + } + dmu_objset_close(os); + + /* + * Destroy the dataset. + */ + error = dmu_objset_destroy(name); + if (error) { + (void) dmu_objset_open(name, DMU_OST_OTHER, + DS_MODE_USER | DS_MODE_READONLY, &os); + fatal(0, "dmu_objset_destroy(os=%p) = %d\n", &os, error); + } + return (0); +} + +/* + * Verify that dmu_objset_{create,destroy,open,close} work as expected. + */ +static uint64_t +ztest_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t object, int mode) +{ + itx_t *itx; + lr_create_t *lr; + size_t namesize; + char name[24]; + + (void) sprintf(name, "ZOBJ_%llu", (u_longlong_t)object); + namesize = strlen(name) + 1; + + itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize + + ztest_random(ZIL_MAX_BLKSZ)); + lr = (lr_create_t *)&itx->itx_lr; + bzero(lr + 1, lr->lr_common.lrc_reclen - sizeof (*lr)); + lr->lr_doid = object; + lr->lr_foid = 0; + lr->lr_mode = mode; + lr->lr_uid = 0; + lr->lr_gid = 0; + lr->lr_gen = dmu_tx_get_txg(tx); + lr->lr_crtime[0] = time(NULL); + lr->lr_crtime[1] = 0; + lr->lr_rdev = 0; + bcopy(name, (char *)(lr + 1), namesize); + + return (zil_itx_assign(zilog, itx, tx)); +} + +void +ztest_dmu_objset_create_destroy(ztest_args_t *za) +{ + int error; + objset_t *os, *os2; + char name[100]; + int basemode, expected_error; + zilog_t *zilog; + uint64_t seq; + uint64_t objects; + ztest_replay_t zr; + + (void) rw_rdlock(&ztest_shared->zs_name_lock); + (void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool, + (u_longlong_t)za->za_instance); + + basemode = DS_MODE_TYPE(za->za_instance); + if (basemode != DS_MODE_USER && basemode != DS_MODE_OWNER) + basemode = DS_MODE_USER; + + /* + * If this dataset exists from a previous run, process its replay log + * half of the time. If we don't replay it, then dmu_objset_destroy() + * (invoked from ztest_destroy_cb() below) should just throw it away. + */ + if (ztest_random(2) == 0 && + dmu_objset_open(name, DMU_OST_OTHER, DS_MODE_OWNER, &os) == 0) { + zr.zr_os = os; + zil_replay(os, &zr, &zr.zr_assign, ztest_replay_vector, NULL); + dmu_objset_close(os); + } + + /* + * There may be an old instance of the dataset we're about to + * create lying around from a previous run. If so, destroy it + * and all of its snapshots. + */ + (void) dmu_objset_find(name, ztest_destroy_cb, za, + DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); + + /* + * Verify that the destroyed dataset is no longer in the namespace. + */ + error = dmu_objset_open(name, DMU_OST_OTHER, basemode, &os); + if (error != ENOENT) + fatal(1, "dmu_objset_open(%s) found destroyed dataset %p", + name, os); + + /* + * Verify that we can create a new dataset. + */ + error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0, + ztest_create_cb, NULL); + if (error) { + if (error == ENOSPC) { + ztest_record_enospc("dmu_objset_create"); + (void) rw_unlock(&ztest_shared->zs_name_lock); + return; + } + fatal(0, "dmu_objset_create(%s) = %d", name, error); + } + + error = dmu_objset_open(name, DMU_OST_OTHER, basemode, &os); + if (error) { + fatal(0, "dmu_objset_open(%s) = %d", name, error); + } + + /* + * Open the intent log for it. + */ + zilog = zil_open(os, NULL); + + /* + * Put a random number of objects in there. + */ + objects = ztest_random(20); + seq = 0; + while (objects-- != 0) { + uint64_t object; + dmu_tx_t *tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, sizeof (name)); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + } else { + object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_NONE, 0, tx); + ztest_set_random_blocksize(os, object, tx); + seq = ztest_log_create(zilog, tx, object, + DMU_OT_UINT64_OTHER); + dmu_write(os, object, 0, sizeof (name), name, tx); + dmu_tx_commit(tx); + } + if (ztest_random(5) == 0) { + zil_commit(zilog, seq, object); + } + if (ztest_random(100) == 0) { + error = zil_suspend(zilog); + if (error == 0) { + zil_resume(zilog); + } + } + } + + /* + * Verify that we cannot create an existing dataset. + */ + error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0, NULL, NULL); + if (error != EEXIST) + fatal(0, "created existing dataset, error = %d", error); + + /* + * Verify that multiple dataset holds are allowed, but only when + * the new access mode is compatible with the base mode. + */ + if (basemode == DS_MODE_OWNER) { + error = dmu_objset_open(name, DMU_OST_OTHER, DS_MODE_USER, + &os2); + if (error) + fatal(0, "dmu_objset_open('%s') = %d", name, error); + else + dmu_objset_close(os2); + } + error = dmu_objset_open(name, DMU_OST_OTHER, DS_MODE_OWNER, &os2); + expected_error = (basemode == DS_MODE_OWNER) ? EBUSY : 0; + if (error != expected_error) + fatal(0, "dmu_objset_open('%s') = %d, expected %d", + name, error, expected_error); + if (error == 0) + dmu_objset_close(os2); + + zil_close(zilog); + dmu_objset_close(os); + + error = dmu_objset_destroy(name); + if (error) + fatal(0, "dmu_objset_destroy(%s) = %d", name, error); + + (void) rw_unlock(&ztest_shared->zs_name_lock); +} + +/* + * Verify that dmu_snapshot_{create,destroy,open,close} work as expected. + */ +void +ztest_dmu_snapshot_create_destroy(ztest_args_t *za) +{ + int error; + objset_t *os = za->za_os; + char snapname[100]; + char osname[MAXNAMELEN]; + + (void) rw_rdlock(&ztest_shared->zs_name_lock); + dmu_objset_name(os, osname); + (void) snprintf(snapname, 100, "%s@%llu", osname, + (u_longlong_t)za->za_instance); + + error = dmu_objset_destroy(snapname); + if (error != 0 && error != ENOENT) + fatal(0, "dmu_objset_destroy() = %d", error); + error = dmu_objset_snapshot(osname, strchr(snapname, '@')+1, FALSE); + if (error == ENOSPC) + ztest_record_enospc("dmu_take_snapshot"); + else if (error != 0 && error != EEXIST) + fatal(0, "dmu_take_snapshot() = %d", error); + (void) rw_unlock(&ztest_shared->zs_name_lock); +} + +/* + * Verify that dmu_object_{alloc,free} work as expected. + */ +void +ztest_dmu_object_alloc_free(ztest_args_t *za) +{ + objset_t *os = za->za_os; + dmu_buf_t *db; + dmu_tx_t *tx; + uint64_t batchobj, object, batchsize, endoff, temp; + int b, c, error, bonuslen; + dmu_object_info_t *doi = &za->za_doi; + char osname[MAXNAMELEN]; + + dmu_objset_name(os, osname); + + endoff = -8ULL; + batchsize = 2; + + /* + * Create a batch object if necessary, and record it in the directory. + */ + VERIFY3U(0, ==, dmu_read(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t), &batchobj)); + if (batchobj == 0) { + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t)); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("create a batch object"); + dmu_tx_abort(tx); + return; + } + batchobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_NONE, 0, tx); + ztest_set_random_blocksize(os, batchobj, tx); + dmu_write(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t), &batchobj, tx); + dmu_tx_commit(tx); + } + + /* + * Destroy the previous batch of objects. + */ + for (b = 0; b < batchsize; b++) { + VERIFY3U(0, ==, dmu_read(os, batchobj, b * sizeof (uint64_t), + sizeof (uint64_t), &object)); + if (object == 0) + continue; + /* + * Read and validate contents. + * We expect the nth byte of the bonus buffer to be n. + */ + VERIFY(0 == dmu_bonus_hold(os, object, FTAG, &db)); + za->za_dbuf = db; + + dmu_object_info_from_db(db, doi); + ASSERT(doi->doi_type == DMU_OT_UINT64_OTHER); + ASSERT(doi->doi_bonus_type == DMU_OT_PLAIN_OTHER); + ASSERT3S(doi->doi_physical_blks, >=, 0); + + bonuslen = doi->doi_bonus_size; + + for (c = 0; c < bonuslen; c++) { + if (((uint8_t *)db->db_data)[c] != + (uint8_t)(c + bonuslen)) { + fatal(0, + "bad bonus: %s, obj %llu, off %d: %u != %u", + osname, object, c, + ((uint8_t *)db->db_data)[c], + (uint8_t)(c + bonuslen)); + } + } + + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; + + /* + * We expect the word at endoff to be our object number. + */ + VERIFY(0 == dmu_read(os, object, endoff, + sizeof (uint64_t), &temp)); + + if (temp != object) { + fatal(0, "bad data in %s, got %llu, expected %llu", + osname, temp, object); + } + + /* + * Destroy old object and clear batch entry. + */ + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, batchobj, + b * sizeof (uint64_t), sizeof (uint64_t)); + dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("free object"); + dmu_tx_abort(tx); + return; + } + error = dmu_object_free(os, object, tx); + if (error) { + fatal(0, "dmu_object_free('%s', %llu) = %d", + osname, object, error); + } + object = 0; + + dmu_object_set_checksum(os, batchobj, + ztest_random_checksum(), tx); + dmu_object_set_compress(os, batchobj, + ztest_random_compress(), tx); + + dmu_write(os, batchobj, b * sizeof (uint64_t), + sizeof (uint64_t), &object, tx); + + dmu_tx_commit(tx); + } + + /* + * Before creating the new batch of objects, generate a bunch of churn. + */ + for (b = ztest_random(100); b > 0; b--) { + tx = dmu_tx_create(os); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("churn objects"); + dmu_tx_abort(tx); + return; + } + object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_NONE, 0, tx); + ztest_set_random_blocksize(os, object, tx); + error = dmu_object_free(os, object, tx); + if (error) { + fatal(0, "dmu_object_free('%s', %llu) = %d", + osname, object, error); + } + dmu_tx_commit(tx); + } + + /* + * Create a new batch of objects with randomly chosen + * blocksizes and record them in the batch directory. + */ + for (b = 0; b < batchsize; b++) { + uint32_t va_blksize; + u_longlong_t va_nblocks; + + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, batchobj, b * sizeof (uint64_t), + sizeof (uint64_t)); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, endoff, + sizeof (uint64_t)); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("create batchobj"); + dmu_tx_abort(tx); + return; + } + bonuslen = (int)ztest_random(dmu_bonus_max()) + 1; + + object = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_PLAIN_OTHER, bonuslen, tx); + + ztest_set_random_blocksize(os, object, tx); + + dmu_object_set_checksum(os, object, + ztest_random_checksum(), tx); + dmu_object_set_compress(os, object, + ztest_random_compress(), tx); + + dmu_write(os, batchobj, b * sizeof (uint64_t), + sizeof (uint64_t), &object, tx); + + /* + * Write to both the bonus buffer and the regular data. + */ + VERIFY(dmu_bonus_hold(os, object, FTAG, &db) == 0); + za->za_dbuf = db; + ASSERT3U(bonuslen, <=, db->db_size); + + dmu_object_size_from_db(db, &va_blksize, &va_nblocks); + ASSERT3S(va_nblocks, >=, 0); + + dmu_buf_will_dirty(db, tx); + + /* + * See comments above regarding the contents of + * the bonus buffer and the word at endoff. + */ + for (c = 0; c < bonuslen; c++) + ((uint8_t *)db->db_data)[c] = (uint8_t)(c + bonuslen); + + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; + + /* + * Write to a large offset to increase indirection. + */ + dmu_write(os, object, endoff, sizeof (uint64_t), &object, tx); + + dmu_tx_commit(tx); + } +} + +/* + * Verify that dmu_{read,write} work as expected. + */ +typedef struct bufwad { + uint64_t bw_index; + uint64_t bw_txg; + uint64_t bw_data; +} bufwad_t; + +typedef struct dmu_read_write_dir { + uint64_t dd_packobj; + uint64_t dd_bigobj; + uint64_t dd_chunk; +} dmu_read_write_dir_t; + +void +ztest_dmu_read_write(ztest_args_t *za) +{ + objset_t *os = za->za_os; + dmu_read_write_dir_t dd; + dmu_tx_t *tx; + int i, freeit, error; + uint64_t n, s, txg; + bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT; + uint64_t packoff, packsize, bigoff, bigsize; + uint64_t regions = 997; + uint64_t stride = 123456789ULL; + uint64_t width = 40; + int free_percent = 5; + + /* + * This test uses two objects, packobj and bigobj, that are always + * updated together (i.e. in the same tx) so that their contents are + * in sync and can be compared. Their contents relate to each other + * in a simple way: packobj is a dense array of 'bufwad' structures, + * while bigobj is a sparse array of the same bufwads. Specifically, + * for any index n, there are three bufwads that should be identical: + * + * packobj, at offset n * sizeof (bufwad_t) + * bigobj, at the head of the nth chunk + * bigobj, at the tail of the nth chunk + * + * The chunk size is arbitrary. It doesn't have to be a power of two, + * and it doesn't have any relation to the object blocksize. + * The only requirement is that it can hold at least two bufwads. + * + * Normally, we write the bufwad to each of these locations. + * However, free_percent of the time we instead write zeroes to + * packobj and perform a dmu_free_range() on bigobj. By comparing + * bigobj to packobj, we can verify that the DMU is correctly + * tracking which parts of an object are allocated and free, + * and that the contents of the allocated blocks are correct. + */ + + /* + * Read the directory info. If it's the first time, set things up. + */ + VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (dd), &dd)); + if (dd.dd_chunk == 0) { + ASSERT(dd.dd_packobj == 0); + ASSERT(dd.dd_bigobj == 0); + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, sizeof (dd)); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("create r/w directory"); + dmu_tx_abort(tx); + return; + } + + dd.dd_packobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_NONE, 0, tx); + dd.dd_bigobj = dmu_object_alloc(os, DMU_OT_UINT64_OTHER, 0, + DMU_OT_NONE, 0, tx); + dd.dd_chunk = (1000 + ztest_random(1000)) * sizeof (uint64_t); + + ztest_set_random_blocksize(os, dd.dd_packobj, tx); + ztest_set_random_blocksize(os, dd.dd_bigobj, tx); + + dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (dd), &dd, + tx); + dmu_tx_commit(tx); + } + + /* + * Prefetch a random chunk of the big object. + * Our aim here is to get some async reads in flight + * for blocks that we may free below; the DMU should + * handle this race correctly. + */ + n = ztest_random(regions) * stride + ztest_random(width); + s = 1 + ztest_random(2 * width - 1); + dmu_prefetch(os, dd.dd_bigobj, n * dd.dd_chunk, s * dd.dd_chunk); + + /* + * Pick a random index and compute the offsets into packobj and bigobj. + */ + n = ztest_random(regions) * stride + ztest_random(width); + s = 1 + ztest_random(width - 1); + + packoff = n * sizeof (bufwad_t); + packsize = s * sizeof (bufwad_t); + + bigoff = n * dd.dd_chunk; + bigsize = s * dd.dd_chunk; + + packbuf = umem_alloc(packsize, UMEM_NOFAIL); + bigbuf = umem_alloc(bigsize, UMEM_NOFAIL); + + /* + * free_percent of the time, free a range of bigobj rather than + * overwriting it. + */ + freeit = (ztest_random(100) < free_percent); + + /* + * Read the current contents of our objects. + */ + error = dmu_read(os, dd.dd_packobj, packoff, packsize, packbuf); + ASSERT3U(error, ==, 0); + error = dmu_read(os, dd.dd_bigobj, bigoff, bigsize, bigbuf); + ASSERT3U(error, ==, 0); + + /* + * Get a tx for the mods to both packobj and bigobj. + */ + tx = dmu_tx_create(os); + + dmu_tx_hold_write(tx, dd.dd_packobj, packoff, packsize); + + if (freeit) + dmu_tx_hold_free(tx, dd.dd_bigobj, bigoff, bigsize); + else + dmu_tx_hold_write(tx, dd.dd_bigobj, bigoff, bigsize); + + error = dmu_tx_assign(tx, TXG_WAIT); + + if (error) { + ztest_record_enospc("dmu r/w range"); + dmu_tx_abort(tx); + umem_free(packbuf, packsize); + umem_free(bigbuf, bigsize); + return; + } + + txg = dmu_tx_get_txg(tx); + + /* + * For each index from n to n + s, verify that the existing bufwad + * in packobj matches the bufwads at the head and tail of the + * corresponding chunk in bigobj. Then update all three bufwads + * with the new values we want to write out. + */ + for (i = 0; i < s; i++) { + /* LINTED */ + pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t)); + /* LINTED */ + bigH = (bufwad_t *)((char *)bigbuf + i * dd.dd_chunk); + /* LINTED */ + bigT = (bufwad_t *)((char *)bigH + dd.dd_chunk) - 1; + + ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize); + ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize); + + if (pack->bw_txg > txg) + fatal(0, "future leak: got %llx, open txg is %llx", + pack->bw_txg, txg); + + if (pack->bw_data != 0 && pack->bw_index != n + i) + fatal(0, "wrong index: got %llx, wanted %llx+%llx", + pack->bw_index, n, i); + + if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0) + fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH); + + if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0) + fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT); + + if (freeit) { + bzero(pack, sizeof (bufwad_t)); + } else { + pack->bw_index = n + i; + pack->bw_txg = txg; + pack->bw_data = 1 + ztest_random(-2ULL); + } + *bigH = *pack; + *bigT = *pack; + } + + /* + * We've verified all the old bufwads, and made new ones. + * Now write them out. + */ + dmu_write(os, dd.dd_packobj, packoff, packsize, packbuf, tx); + + if (freeit) { + if (zopt_verbose >= 6) { + (void) printf("freeing offset %llx size %llx" + " txg %llx\n", + (u_longlong_t)bigoff, + (u_longlong_t)bigsize, + (u_longlong_t)txg); + } + VERIFY(0 == dmu_free_range(os, dd.dd_bigobj, bigoff, + bigsize, tx)); + } else { + if (zopt_verbose >= 6) { + (void) printf("writing offset %llx size %llx" + " txg %llx\n", + (u_longlong_t)bigoff, + (u_longlong_t)bigsize, + (u_longlong_t)txg); + } + dmu_write(os, dd.dd_bigobj, bigoff, bigsize, bigbuf, tx); + } + + dmu_tx_commit(tx); + + /* + * Sanity check the stuff we just wrote. + */ + { + void *packcheck = umem_alloc(packsize, UMEM_NOFAIL); + void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL); + + VERIFY(0 == dmu_read(os, dd.dd_packobj, packoff, + packsize, packcheck)); + VERIFY(0 == dmu_read(os, dd.dd_bigobj, bigoff, + bigsize, bigcheck)); + + ASSERT(bcmp(packbuf, packcheck, packsize) == 0); + ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0); + + umem_free(packcheck, packsize); + umem_free(bigcheck, bigsize); + } + + umem_free(packbuf, packsize); + umem_free(bigbuf, bigsize); +} + +void +ztest_dmu_check_future_leak(ztest_args_t *za) +{ + objset_t *os = za->za_os; + dmu_buf_t *db; + ztest_block_tag_t *bt; + dmu_object_info_t *doi = &za->za_doi; + + /* + * Make sure that, if there is a write record in the bonus buffer + * of the ZTEST_DIROBJ, that the txg for this record is <= the + * last synced txg of the pool. + */ + VERIFY(dmu_bonus_hold(os, ZTEST_DIROBJ, FTAG, &db) == 0); + za->za_dbuf = db; + VERIFY(dmu_object_info(os, ZTEST_DIROBJ, doi) == 0); + ASSERT3U(doi->doi_bonus_size, >=, sizeof (*bt)); + ASSERT3U(doi->doi_bonus_size, <=, db->db_size); + ASSERT3U(doi->doi_bonus_size % sizeof (*bt), ==, 0); + bt = (void *)((char *)db->db_data + doi->doi_bonus_size - sizeof (*bt)); + if (bt->bt_objset != 0) { + ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os)); + ASSERT3U(bt->bt_object, ==, ZTEST_DIROBJ); + ASSERT3U(bt->bt_offset, ==, -1ULL); + ASSERT3U(bt->bt_txg, <, spa_first_txg(za->za_spa)); + } + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; +} + +void +ztest_dmu_write_parallel(ztest_args_t *za) +{ + objset_t *os = za->za_os; + ztest_block_tag_t *rbt = &za->za_rbt; + ztest_block_tag_t *wbt = &za->za_wbt; + const size_t btsize = sizeof (ztest_block_tag_t); + dmu_buf_t *db; + int b, error; + int bs = ZTEST_DIROBJ_BLOCKSIZE; + int do_free = 0; + uint64_t off, txg, txg_how; + mutex_t *lp; + char osname[MAXNAMELEN]; + char iobuf[SPA_MAXBLOCKSIZE]; + blkptr_t blk = { 0 }; + uint64_t blkoff; + zbookmark_t zb; + dmu_tx_t *tx = dmu_tx_create(os); + + dmu_objset_name(os, osname); + + /* + * Have multiple threads write to large offsets in ZTEST_DIROBJ + * to verify that having multiple threads writing to the same object + * in parallel doesn't cause any trouble. + */ + if (ztest_random(4) == 0) { + /* + * Do the bonus buffer instead of a regular block. + * We need a lock to serialize resize vs. others, + * so we hash on the objset ID. + */ + b = dmu_objset_id(os) % ZTEST_SYNC_LOCKS; + off = -1ULL; + dmu_tx_hold_bonus(tx, ZTEST_DIROBJ); + } else { + b = ztest_random(ZTEST_SYNC_LOCKS); + off = za->za_diroff_shared + (b << SPA_MAXBLOCKSHIFT); + if (ztest_random(4) == 0) { + do_free = 1; + dmu_tx_hold_free(tx, ZTEST_DIROBJ, off, bs); + } else { + dmu_tx_hold_write(tx, ZTEST_DIROBJ, off, bs); + } + } + + txg_how = ztest_random(2) == 0 ? TXG_WAIT : TXG_NOWAIT; + error = dmu_tx_assign(tx, txg_how); + if (error) { + if (error == ERESTART) { + ASSERT(txg_how == TXG_NOWAIT); + dmu_tx_wait(tx); + } else { + ztest_record_enospc("dmu write parallel"); + } + dmu_tx_abort(tx); + return; + } + txg = dmu_tx_get_txg(tx); + + lp = &ztest_shared->zs_sync_lock[b]; + (void) mutex_lock(lp); + + wbt->bt_objset = dmu_objset_id(os); + wbt->bt_object = ZTEST_DIROBJ; + wbt->bt_offset = off; + wbt->bt_txg = txg; + wbt->bt_thread = za->za_instance; + wbt->bt_seq = ztest_shared->zs_seq[b]++; /* protected by lp */ + + /* + * Occasionally, write an all-zero block to test the behavior + * of blocks that compress into holes. + */ + if (off != -1ULL && ztest_random(8) == 0) + bzero(wbt, btsize); + + if (off == -1ULL) { + dmu_object_info_t *doi = &za->za_doi; + char *dboff; + + VERIFY(dmu_bonus_hold(os, ZTEST_DIROBJ, FTAG, &db) == 0); + za->za_dbuf = db; + dmu_object_info_from_db(db, doi); + ASSERT3U(doi->doi_bonus_size, <=, db->db_size); + ASSERT3U(doi->doi_bonus_size, >=, btsize); + ASSERT3U(doi->doi_bonus_size % btsize, ==, 0); + dboff = (char *)db->db_data + doi->doi_bonus_size - btsize; + bcopy(dboff, rbt, btsize); + if (rbt->bt_objset != 0) { + ASSERT3U(rbt->bt_objset, ==, wbt->bt_objset); + ASSERT3U(rbt->bt_object, ==, wbt->bt_object); + ASSERT3U(rbt->bt_offset, ==, wbt->bt_offset); + ASSERT3U(rbt->bt_txg, <=, wbt->bt_txg); + } + if (ztest_random(10) == 0) { + int newsize = (ztest_random(db->db_size / + btsize) + 1) * btsize; + + ASSERT3U(newsize, >=, btsize); + ASSERT3U(newsize, <=, db->db_size); + VERIFY3U(dmu_set_bonus(db, newsize, tx), ==, 0); + dboff = (char *)db->db_data + newsize - btsize; + } + dmu_buf_will_dirty(db, tx); + bcopy(wbt, dboff, btsize); + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; + } else if (do_free) { + VERIFY(dmu_free_range(os, ZTEST_DIROBJ, off, bs, tx) == 0); + } else { + dmu_write(os, ZTEST_DIROBJ, off, btsize, wbt, tx); + } + + (void) mutex_unlock(lp); + + if (ztest_random(1000) == 0) + (void) poll(NULL, 0, 1); /* open dn_notxholds window */ + + dmu_tx_commit(tx); + + if (ztest_random(10000) == 0) + txg_wait_synced(dmu_objset_pool(os), txg); + + if (off == -1ULL || do_free) + return; + + if (ztest_random(2) != 0) + return; + + /* + * dmu_sync() the block we just wrote. + */ + (void) mutex_lock(lp); + + blkoff = P2ALIGN_TYPED(off, bs, uint64_t); + error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db); + za->za_dbuf = db; + if (error) { + dprintf("dmu_buf_hold(%s, %d, %llx) = %d\n", + osname, ZTEST_DIROBJ, blkoff, error); + (void) mutex_unlock(lp); + return; + } + blkoff = off - blkoff; + error = dmu_sync(NULL, db, &blk, txg, NULL, NULL); + dmu_buf_rele(db, FTAG); + za->za_dbuf = NULL; + + (void) mutex_unlock(lp); + + if (error) { + dprintf("dmu_sync(%s, %d, %llx) = %d\n", + osname, ZTEST_DIROBJ, off, error); + return; + } + + if (blk.blk_birth == 0) /* concurrent free */ + return; + + txg_suspend(dmu_objset_pool(os)); + + ASSERT(blk.blk_fill == 1); + ASSERT3U(BP_GET_TYPE(&blk), ==, DMU_OT_UINT64_OTHER); + ASSERT3U(BP_GET_LEVEL(&blk), ==, 0); + ASSERT3U(BP_GET_LSIZE(&blk), ==, bs); + + /* + * Read the block that dmu_sync() returned to make sure its contents + * match what we wrote. We do this while still txg_suspend()ed + * to ensure that the block can't be reused before we read it. + */ + zb.zb_objset = dmu_objset_id(os); + zb.zb_object = ZTEST_DIROBJ; + zb.zb_level = 0; + zb.zb_blkid = off / bs; + error = zio_wait(zio_read(NULL, za->za_spa, &blk, iobuf, bs, + NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_MUSTSUCCEED, &zb)); + ASSERT3U(error, ==, 0); + + txg_resume(dmu_objset_pool(os)); + + bcopy(&iobuf[blkoff], rbt, btsize); + + if (rbt->bt_objset == 0) /* concurrent free */ + return; + + if (wbt->bt_objset == 0) /* all-zero overwrite */ + return; + + ASSERT3U(rbt->bt_objset, ==, wbt->bt_objset); + ASSERT3U(rbt->bt_object, ==, wbt->bt_object); + ASSERT3U(rbt->bt_offset, ==, wbt->bt_offset); + + /* + * The semantic of dmu_sync() is that we always push the most recent + * version of the data, so in the face of concurrent updates we may + * see a newer version of the block. That's OK. + */ + ASSERT3U(rbt->bt_txg, >=, wbt->bt_txg); + if (rbt->bt_thread == wbt->bt_thread) + ASSERT3U(rbt->bt_seq, ==, wbt->bt_seq); + else + ASSERT3U(rbt->bt_seq, >, wbt->bt_seq); +} + +/* + * Verify that zap_{create,destroy,add,remove,update} work as expected. + */ +#define ZTEST_ZAP_MIN_INTS 1 +#define ZTEST_ZAP_MAX_INTS 4 +#define ZTEST_ZAP_MAX_PROPS 1000 + +void +ztest_zap(ztest_args_t *za) +{ + objset_t *os = za->za_os; + uint64_t object; + uint64_t txg, last_txg; + uint64_t value[ZTEST_ZAP_MAX_INTS]; + uint64_t zl_ints, zl_intsize, prop; + int i, ints; + dmu_tx_t *tx; + char propname[100], txgname[100]; + int error; + char osname[MAXNAMELEN]; + char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; + + dmu_objset_name(os, osname); + + /* + * Create a new object if necessary, and record it in the directory. + */ + VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t), &object)); + + if (object == 0) { + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t)); + dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("create zap test obj"); + dmu_tx_abort(tx); + return; + } + object = zap_create(os, DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx); + if (error) { + fatal(0, "zap_create('%s', %llu) = %d", + osname, object, error); + } + ASSERT(object != 0); + dmu_write(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t), &object, tx); + /* + * Generate a known hash collision, and verify that + * we can lookup and remove both entries. + */ + for (i = 0; i < 2; i++) { + value[i] = i; + error = zap_add(os, object, hc[i], sizeof (uint64_t), + 1, &value[i], tx); + ASSERT3U(error, ==, 0); + } + for (i = 0; i < 2; i++) { + error = zap_add(os, object, hc[i], sizeof (uint64_t), + 1, &value[i], tx); + ASSERT3U(error, ==, EEXIST); + error = zap_length(os, object, hc[i], + &zl_intsize, &zl_ints); + ASSERT3U(error, ==, 0); + ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); + ASSERT3U(zl_ints, ==, 1); + } + for (i = 0; i < 2; i++) { + error = zap_remove(os, object, hc[i], tx); + ASSERT3U(error, ==, 0); + } + + dmu_tx_commit(tx); + } + + ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS); + + prop = ztest_random(ZTEST_ZAP_MAX_PROPS); + (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); + (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); + bzero(value, sizeof (value)); + last_txg = 0; + + /* + * If these zap entries already exist, validate their contents. + */ + error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); + if (error == 0) { + ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); + ASSERT3U(zl_ints, ==, 1); + + VERIFY(zap_lookup(os, object, txgname, zl_intsize, + zl_ints, &last_txg) == 0); + + VERIFY(zap_length(os, object, propname, &zl_intsize, + &zl_ints) == 0); + + ASSERT3U(zl_intsize, ==, sizeof (uint64_t)); + ASSERT3U(zl_ints, ==, ints); + + VERIFY(zap_lookup(os, object, propname, zl_intsize, + zl_ints, value) == 0); + + for (i = 0; i < ints; i++) { + ASSERT3U(value[i], ==, last_txg + object + i); + } + } else { + ASSERT3U(error, ==, ENOENT); + } + + /* + * Atomically update two entries in our zap object. + * The first is named txg_%llu, and contains the txg + * in which the property was last updated. The second + * is named prop_%llu, and the nth element of its value + * should be txg + object + n. + */ + tx = dmu_tx_create(os); + dmu_tx_hold_zap(tx, object, TRUE, NULL); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("create zap entry"); + dmu_tx_abort(tx); + return; + } + txg = dmu_tx_get_txg(tx); + + if (last_txg > txg) + fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); + + for (i = 0; i < ints; i++) + value[i] = txg + object + i; + + error = zap_update(os, object, txgname, sizeof (uint64_t), 1, &txg, tx); + if (error) + fatal(0, "zap_update('%s', %llu, '%s') = %d", + osname, object, txgname, error); + + error = zap_update(os, object, propname, sizeof (uint64_t), + ints, value, tx); + if (error) + fatal(0, "zap_update('%s', %llu, '%s') = %d", + osname, object, propname, error); + + dmu_tx_commit(tx); + + /* + * Remove a random pair of entries. + */ + prop = ztest_random(ZTEST_ZAP_MAX_PROPS); + (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop); + (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop); + + error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); + + if (error == ENOENT) + return; + + ASSERT3U(error, ==, 0); + + tx = dmu_tx_create(os); + dmu_tx_hold_zap(tx, object, TRUE, NULL); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("remove zap entry"); + dmu_tx_abort(tx); + return; + } + error = zap_remove(os, object, txgname, tx); + if (error) + fatal(0, "zap_remove('%s', %llu, '%s') = %d", + osname, object, txgname, error); + + error = zap_remove(os, object, propname, tx); + if (error) + fatal(0, "zap_remove('%s', %llu, '%s') = %d", + osname, object, propname, error); + + dmu_tx_commit(tx); + + /* + * Once in a while, destroy the object. + */ + if (ztest_random(1000) != 0) + return; + + tx = dmu_tx_create(os); + dmu_tx_hold_write(tx, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t)); + dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("destroy zap object"); + dmu_tx_abort(tx); + return; + } + error = zap_destroy(os, object, tx); + if (error) + fatal(0, "zap_destroy('%s', %llu) = %d", + osname, object, error); + object = 0; + dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t), + &object, tx); + dmu_tx_commit(tx); +} + +void +ztest_zap_parallel(ztest_args_t *za) +{ + objset_t *os = za->za_os; + uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; + dmu_tx_t *tx; + int i, namelen, error; + char name[20], string_value[20]; + void *data; + + /* + * Generate a random name of the form 'xxx.....' where each + * x is a random printable character and the dots are dots. + * There are 94 such characters, and the name length goes from + * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names. + */ + namelen = ztest_random(sizeof (name) - 5) + 5 + 1; + + for (i = 0; i < 3; i++) + name[i] = '!' + ztest_random('~' - '!' + 1); + for (; i < namelen - 1; i++) + name[i] = '.'; + name[i] = '\0'; + + if (ztest_random(2) == 0) + object = ZTEST_MICROZAP_OBJ; + else + object = ZTEST_FATZAP_OBJ; + + if ((namelen & 1) || object == ZTEST_MICROZAP_OBJ) { + wsize = sizeof (txg); + wc = 1; + data = &txg; + } else { + wsize = 1; + wc = namelen; + data = string_value; + } + + count = -1ULL; + VERIFY(zap_count(os, object, &count) == 0); + ASSERT(count != -1ULL); + + /* + * Select an operation: length, lookup, add, update, remove. + */ + i = ztest_random(5); + + if (i >= 2) { + tx = dmu_tx_create(os); + dmu_tx_hold_zap(tx, object, TRUE, NULL); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + ztest_record_enospc("zap parallel"); + dmu_tx_abort(tx); + return; + } + txg = dmu_tx_get_txg(tx); + bcopy(name, string_value, namelen); + } else { + tx = NULL; + txg = 0; + bzero(string_value, namelen); + } + + switch (i) { + + case 0: + error = zap_length(os, object, name, &zl_wsize, &zl_wc); + if (error == 0) { + ASSERT3U(wsize, ==, zl_wsize); + ASSERT3U(wc, ==, zl_wc); + } else { + ASSERT3U(error, ==, ENOENT); + } + break; + + case 1: + error = zap_lookup(os, object, name, wsize, wc, data); + if (error == 0) { + if (data == string_value && + bcmp(name, data, namelen) != 0) + fatal(0, "name '%s' != val '%s' len %d", + name, data, namelen); + } else { + ASSERT3U(error, ==, ENOENT); + } + break; + + case 2: + error = zap_add(os, object, name, wsize, wc, data, tx); + ASSERT(error == 0 || error == EEXIST); + break; + + case 3: + VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0); + break; + + case 4: + error = zap_remove(os, object, name, tx); + ASSERT(error == 0 || error == ENOENT); + break; + } + + if (tx != NULL) + dmu_tx_commit(tx); +} + +void +ztest_dsl_prop_get_set(ztest_args_t *za) +{ + objset_t *os = za->za_os; + int i, inherit; + uint64_t value; + const char *prop, *valname; + char setpoint[MAXPATHLEN]; + char osname[MAXNAMELEN]; + int error; + + (void) rw_rdlock(&ztest_shared->zs_name_lock); + + dmu_objset_name(os, osname); + + for (i = 0; i < 2; i++) { + if (i == 0) { + prop = "checksum"; + value = ztest_random_checksum(); + inherit = (value == ZIO_CHECKSUM_INHERIT); + } else { + prop = "compression"; + value = ztest_random_compress(); + inherit = (value == ZIO_COMPRESS_INHERIT); + } + + error = dsl_prop_set(osname, prop, sizeof (value), + !inherit, &value); + + if (error == ENOSPC) { + ztest_record_enospc("dsl_prop_set"); + break; + } + + ASSERT3U(error, ==, 0); + + VERIFY3U(dsl_prop_get(osname, prop, sizeof (value), + 1, &value, setpoint), ==, 0); + + if (i == 0) + valname = zio_checksum_table[value].ci_name; + else + valname = zio_compress_table[value].ci_name; + + if (zopt_verbose >= 6) { + (void) printf("%s %s = %s for '%s'\n", + osname, prop, valname, setpoint); + } + } + + (void) rw_unlock(&ztest_shared->zs_name_lock); +} + +/* + * Inject random faults into the on-disk data. + */ +void +ztest_fault_inject(ztest_args_t *za) +{ + int fd; + uint64_t offset; + uint64_t leaves = MAX(zopt_mirrors, 1) * zopt_raidz; + uint64_t bad = 0x1990c0ffeedecade; + uint64_t top, leaf; + char path0[MAXPATHLEN]; + char pathrand[MAXPATHLEN]; + size_t fsize; + spa_t *spa = za->za_spa; + int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */ + int iters = 1000; + int maxfaults = zopt_maxfaults; + vdev_t *vd0 = NULL; + uint64_t guid0 = 0; + + ASSERT(leaves >= 1); + + /* + * We need SCL_STATE here because we're going to look at vd0->vdev_tsd. + */ + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + + if (ztest_random(2) == 0) { + /* + * Inject errors on a normal data device. + */ + top = ztest_random(spa->spa_root_vdev->vdev_children); + leaf = ztest_random(leaves); + + /* + * Generate paths to the first leaf in this top-level vdev, + * and to the random leaf we selected. We'll induce transient + * write failures and random online/offline activity on leaf 0, + * and we'll write random garbage to the randomly chosen leaf. + */ + (void) snprintf(path0, sizeof (path0), ztest_dev_template, + zopt_dir, zopt_pool, top * leaves + 0); + (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template, + zopt_dir, zopt_pool, top * leaves + leaf); + + vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0); + if (vd0 != NULL && maxfaults != 1) { + /* + * Make vd0 explicitly claim to be unreadable, + * or unwriteable, or reach behind its back + * and close the underlying fd. We can do this if + * maxfaults == 0 because we'll fail and reexecute, + * and we can do it if maxfaults >= 2 because we'll + * have enough redundancy. If maxfaults == 1, the + * combination of this with injection of random data + * corruption below exceeds the pool's fault tolerance. + */ + vdev_file_t *vf = vd0->vdev_tsd; + + if (vf != NULL && ztest_random(3) == 0) { + (void) close(vf->vf_vnode->v_fd); + vf->vf_vnode->v_fd = -1; + } else if (ztest_random(2) == 0) { + vd0->vdev_cant_read = B_TRUE; + } else { + vd0->vdev_cant_write = B_TRUE; + } + guid0 = vd0->vdev_guid; + } + } else { + /* + * Inject errors on an l2cache device. + */ + spa_aux_vdev_t *sav = &spa->spa_l2cache; + + if (sav->sav_count == 0) { + spa_config_exit(spa, SCL_STATE, FTAG); + return; + } + vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)]; + guid0 = vd0->vdev_guid; + (void) strcpy(path0, vd0->vdev_path); + (void) strcpy(pathrand, vd0->vdev_path); + + leaf = 0; + leaves = 1; + maxfaults = INT_MAX; /* no limit on cache devices */ + } + + dprintf("damaging %s and %s\n", path0, pathrand); + + spa_config_exit(spa, SCL_STATE, FTAG); + + if (maxfaults == 0) + return; + + /* + * If we can tolerate two or more faults, randomly online/offline vd0. + */ + if (maxfaults >= 2 && guid0 != 0) { + if (ztest_random(10) < 6) + (void) vdev_offline(spa, guid0, B_TRUE); + else + (void) vdev_online(spa, guid0, B_FALSE, NULL); + } + + /* + * We have at least single-fault tolerance, so inject data corruption. + */ + fd = open(pathrand, O_RDWR); + + if (fd == -1) /* we hit a gap in the device namespace */ + return; + + fsize = lseek(fd, 0, SEEK_END); + + while (--iters != 0) { + offset = ztest_random(fsize / (leaves << bshift)) * + (leaves << bshift) + (leaf << bshift) + + (ztest_random(1ULL << (bshift - 1)) & -8ULL); + + if (offset >= fsize) + continue; + + if (zopt_verbose >= 6) + (void) printf("injecting bad word into %s," + " offset 0x%llx\n", pathrand, (u_longlong_t)offset); + + if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad)) + fatal(1, "can't inject bad word at 0x%llx in %s", + offset, pathrand); + } + + (void) close(fd); +} + +/* + * Scrub the pool. + */ +void +ztest_scrub(ztest_args_t *za) +{ + spa_t *spa = za->za_spa; + + (void) spa_scrub(spa, POOL_SCRUB_EVERYTHING); + (void) poll(NULL, 0, 1000); /* wait a second, then force a restart */ + (void) spa_scrub(spa, POOL_SCRUB_EVERYTHING); +} + +/* + * Rename the pool to a different name and then rename it back. + */ +void +ztest_spa_rename(ztest_args_t *za) +{ + char *oldname, *newname; + int error; + spa_t *spa; + + (void) rw_wrlock(&ztest_shared->zs_name_lock); + + oldname = za->za_pool; + newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); + (void) strcpy(newname, oldname); + (void) strcat(newname, "_tmp"); + + /* + * Do the rename + */ + error = spa_rename(oldname, newname); + if (error) + fatal(0, "spa_rename('%s', '%s') = %d", oldname, + newname, error); + + /* + * Try to open it under the old name, which shouldn't exist + */ + error = spa_open(oldname, &spa, FTAG); + if (error != ENOENT) + fatal(0, "spa_open('%s') = %d", oldname, error); + + /* + * Open it under the new name and make sure it's still the same spa_t. + */ + error = spa_open(newname, &spa, FTAG); + if (error != 0) + fatal(0, "spa_open('%s') = %d", newname, error); + + ASSERT(spa == za->za_spa); + spa_close(spa, FTAG); + + /* + * Rename it back to the original + */ + error = spa_rename(newname, oldname); + if (error) + fatal(0, "spa_rename('%s', '%s') = %d", newname, + oldname, error); + + /* + * Make sure it can still be opened + */ + error = spa_open(oldname, &spa, FTAG); + if (error != 0) + fatal(0, "spa_open('%s') = %d", oldname, error); + + ASSERT(spa == za->za_spa); + spa_close(spa, FTAG); + + umem_free(newname, strlen(newname) + 1); + + (void) rw_unlock(&ztest_shared->zs_name_lock); +} + + +/* + * Completely obliterate one disk. + */ +static void +ztest_obliterate_one_disk(uint64_t vdev) +{ + int fd; + char dev_name[MAXPATHLEN], copy_name[MAXPATHLEN]; + size_t fsize; + + if (zopt_maxfaults < 2) + return; + + (void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev); + (void) snprintf(copy_name, MAXPATHLEN, "%s.old", dev_name); + + fd = open(dev_name, O_RDWR); + + if (fd == -1) + fatal(1, "can't open %s", dev_name); + + /* + * Determine the size. + */ + fsize = lseek(fd, 0, SEEK_END); + + (void) close(fd); + + /* + * Rename the old device to dev_name.old (useful for debugging). + */ + VERIFY(rename(dev_name, copy_name) == 0); + + /* + * Create a new one. + */ + VERIFY((fd = open(dev_name, O_RDWR | O_CREAT | O_TRUNC, 0666)) >= 0); + VERIFY(ftruncate(fd, fsize) == 0); + (void) close(fd); +} + +static void +ztest_replace_one_disk(spa_t *spa, uint64_t vdev) +{ + char dev_name[MAXPATHLEN]; + nvlist_t *root; + int error; + uint64_t guid; + vdev_t *vd; + + (void) sprintf(dev_name, ztest_dev_template, zopt_dir, zopt_pool, vdev); + + /* + * Build the nvlist describing dev_name. + */ + root = make_vdev_root(dev_name, NULL, 0, 0, 0, 0, 0, 1); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + if ((vd = vdev_lookup_by_path(spa->spa_root_vdev, dev_name)) == NULL) + guid = 0; + else + guid = vd->vdev_guid; + spa_config_exit(spa, SCL_VDEV, FTAG); + error = spa_vdev_attach(spa, guid, root, B_TRUE); + if (error != 0 && + error != EBUSY && + error != ENOTSUP && + error != ENODEV && + error != EDOM) + fatal(0, "spa_vdev_attach(in-place) = %d", error); + + nvlist_free(root); +} + +static void +ztest_verify_blocks(char *pool) +{ + int status; + char zdb[MAXPATHLEN + MAXNAMELEN + 20]; + char zbuf[1024]; + char *bin; + char *ztest; + char *isa; + int isalen; + FILE *fp; + + (void) realpath(getexecname(), zdb); + + /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */ + bin = strstr(zdb, "/usr/bin/"); + ztest = strstr(bin, "/ztest"); + isa = bin + 8; + isalen = ztest - isa; + isa = strdup(isa); + /* LINTED */ + (void) sprintf(bin, + "/usr/sbin%.*s/zdb -bc%s%s -U /tmp/zpool.cache %s", + isalen, + isa, + zopt_verbose >= 3 ? "s" : "", + zopt_verbose >= 4 ? "v" : "", + pool); + free(isa); + + if (zopt_verbose >= 5) + (void) printf("Executing %s\n", strstr(zdb, "zdb ")); + + fp = popen(zdb, "r"); + + while (fgets(zbuf, sizeof (zbuf), fp) != NULL) + if (zopt_verbose >= 3) + (void) printf("%s", zbuf); + + status = pclose(fp); + + if (status == 0) + return; + + ztest_dump_core = 0; + if (WIFEXITED(status)) + fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status)); + else + fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status)); +} + +static void +ztest_walk_pool_directory(char *header) +{ + spa_t *spa = NULL; + + if (zopt_verbose >= 6) + (void) printf("%s\n", header); + + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(spa)) != NULL) + if (zopt_verbose >= 6) + (void) printf("\t%s\n", spa_name(spa)); + mutex_exit(&spa_namespace_lock); +} + +static void +ztest_spa_import_export(char *oldname, char *newname) +{ + nvlist_t *config; + uint64_t pool_guid; + spa_t *spa; + int error; + + if (zopt_verbose >= 4) { + (void) printf("import/export: old = %s, new = %s\n", + oldname, newname); + } + + /* + * Clean up from previous runs. + */ + (void) spa_destroy(newname); + + /* + * Get the pool's configuration and guid. + */ + error = spa_open(oldname, &spa, FTAG); + if (error) + fatal(0, "spa_open('%s') = %d", oldname, error); + + pool_guid = spa_guid(spa); + spa_close(spa, FTAG); + + ztest_walk_pool_directory("pools before export"); + + /* + * Export it. + */ + error = spa_export(oldname, &config, B_FALSE); + if (error) + fatal(0, "spa_export('%s') = %d", oldname, error); + + ztest_walk_pool_directory("pools after export"); + + /* + * Import it under the new name. + */ + error = spa_import(newname, config, NULL); + if (error) + fatal(0, "spa_import('%s') = %d", newname, error); + + ztest_walk_pool_directory("pools after import"); + + /* + * Try to import it again -- should fail with EEXIST. + */ + error = spa_import(newname, config, NULL); + if (error != EEXIST) + fatal(0, "spa_import('%s') twice", newname); + + /* + * Try to import it under a different name -- should fail with EEXIST. + */ + error = spa_import(oldname, config, NULL); + if (error != EEXIST) + fatal(0, "spa_import('%s') under multiple names", newname); + + /* + * Verify that the pool is no longer visible under the old name. + */ + error = spa_open(oldname, &spa, FTAG); + if (error != ENOENT) + fatal(0, "spa_open('%s') = %d", newname, error); + + /* + * Verify that we can open and close the pool using the new name. + */ + error = spa_open(newname, &spa, FTAG); + if (error) + fatal(0, "spa_open('%s') = %d", newname, error); + ASSERT(pool_guid == spa_guid(spa)); + spa_close(spa, FTAG); + + nvlist_free(config); +} + +static void * +ztest_resume(void *arg) +{ + spa_t *spa = arg; + + while (!ztest_exiting) { + (void) poll(NULL, 0, 1000); + + if (!spa_suspended(spa)) + continue; + + spa_vdev_state_enter(spa); + vdev_clear(spa, NULL); + (void) spa_vdev_state_exit(spa, NULL, 0); + + zio_resume(spa); + } + return (NULL); +} + +static void * +ztest_thread(void *arg) +{ + ztest_args_t *za = arg; + ztest_shared_t *zs = ztest_shared; + hrtime_t now, functime; + ztest_info_t *zi; + int f, i; + + while ((now = gethrtime()) < za->za_stop) { + /* + * See if it's time to force a crash. + */ + if (now > za->za_kill) { + zs->zs_alloc = spa_get_alloc(za->za_spa); + zs->zs_space = spa_get_space(za->za_spa); + (void) kill(getpid(), SIGKILL); + } + + /* + * Pick a random function. + */ + f = ztest_random(ZTEST_FUNCS); + zi = &zs->zs_info[f]; + + /* + * Decide whether to call it, based on the requested frequency. + */ + if (zi->zi_call_target == 0 || + (double)zi->zi_call_total / zi->zi_call_target > + (double)(now - zs->zs_start_time) / (zopt_time * NANOSEC)) + continue; + + atomic_add_64(&zi->zi_calls, 1); + atomic_add_64(&zi->zi_call_total, 1); + + za->za_diroff = (za->za_instance * ZTEST_FUNCS + f) * + ZTEST_DIRSIZE; + za->za_diroff_shared = (1ULL << 63); + + for (i = 0; i < zi->zi_iters; i++) + zi->zi_func(za); + + functime = gethrtime() - now; + + atomic_add_64(&zi->zi_call_time, functime); + + if (zopt_verbose >= 4) { + Dl_info dli; + (void) dladdr((void *)zi->zi_func, &dli); + (void) printf("%6.2f sec in %s\n", + (double)functime / NANOSEC, dli.dli_sname); + } + + /* + * If we're getting ENOSPC with some regularity, stop. + */ + if (zs->zs_enospc_count > 10) + break; + } + + return (NULL); +} + +/* + * Kick off threads to run tests on all datasets in parallel. + */ +static void +ztest_run(char *pool) +{ + int t, d, error; + ztest_shared_t *zs = ztest_shared; + ztest_args_t *za; + spa_t *spa; + char name[100]; + thread_t resume_tid; + + ztest_exiting = B_FALSE; + + (void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL); + (void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL); + + for (t = 0; t < ZTEST_SYNC_LOCKS; t++) + (void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL); + + /* + * Destroy one disk before we even start. + * It's mirrored, so everything should work just fine. + * This makes us exercise fault handling very early in spa_load(). + */ + ztest_obliterate_one_disk(0); + + /* + * Verify that the sum of the sizes of all blocks in the pool + * equals the SPA's allocated space total. + */ + ztest_verify_blocks(pool); + + /* + * Kick off a replacement of the disk we just obliterated. + */ + kernel_init(FREAD | FWRITE); + VERIFY(spa_open(pool, &spa, FTAG) == 0); + ztest_replace_one_disk(spa, 0); + if (zopt_verbose >= 5) + show_pool_stats(spa); + spa_close(spa, FTAG); + kernel_fini(); + + kernel_init(FREAD | FWRITE); + + /* + * Verify that we can export the pool and reimport it under a + * different name. + */ + if (ztest_random(2) == 0) { + (void) snprintf(name, 100, "%s_import", pool); + ztest_spa_import_export(pool, name); + ztest_spa_import_export(name, pool); + } + + /* + * Verify that we can loop over all pools. + */ + mutex_enter(&spa_namespace_lock); + for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa)) { + if (zopt_verbose > 3) { + (void) printf("spa_next: found %s\n", spa_name(spa)); + } + } + mutex_exit(&spa_namespace_lock); + + /* + * Open our pool. + */ + VERIFY(spa_open(pool, &spa, FTAG) == 0); + + /* + * Create a thread to periodically resume suspended I/O. + */ + VERIFY(thr_create(0, 0, ztest_resume, spa, THR_BOUND, + &resume_tid) == 0); + + /* + * Verify that we can safely inquire about about any object, + * whether it's allocated or not. To make it interesting, + * we probe a 5-wide window around each power of two. + * This hits all edge cases, including zero and the max. + */ + for (t = 0; t < 64; t++) { + for (d = -5; d <= 5; d++) { + error = dmu_object_info(spa->spa_meta_objset, + (1ULL << t) + d, NULL); + ASSERT(error == 0 || error == ENOENT || + error == EINVAL); + } + } + + /* + * Now kick off all the tests that run in parallel. + */ + zs->zs_enospc_count = 0; + + za = umem_zalloc(zopt_threads * sizeof (ztest_args_t), UMEM_NOFAIL); + + if (zopt_verbose >= 4) + (void) printf("starting main threads...\n"); + + za[0].za_start = gethrtime(); + za[0].za_stop = za[0].za_start + zopt_passtime * NANOSEC; + za[0].za_stop = MIN(za[0].za_stop, zs->zs_stop_time); + za[0].za_kill = za[0].za_stop; + if (ztest_random(100) < zopt_killrate) + za[0].za_kill -= ztest_random(zopt_passtime * NANOSEC); + + for (t = 0; t < zopt_threads; t++) { + d = t % zopt_datasets; + + (void) strcpy(za[t].za_pool, pool); + za[t].za_os = za[d].za_os; + za[t].za_spa = spa; + za[t].za_zilog = za[d].za_zilog; + za[t].za_instance = t; + za[t].za_random = ztest_random(-1ULL); + za[t].za_start = za[0].za_start; + za[t].za_stop = za[0].za_stop; + za[t].za_kill = za[0].za_kill; + + if (t < zopt_datasets) { + ztest_replay_t zr; + int test_future = FALSE; + (void) rw_rdlock(&ztest_shared->zs_name_lock); + (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); + error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0, + ztest_create_cb, NULL); + if (error == EEXIST) { + test_future = TRUE; + } else if (error == ENOSPC) { + zs->zs_enospc_count++; + (void) rw_unlock(&ztest_shared->zs_name_lock); + break; + } else if (error != 0) { + fatal(0, "dmu_objset_create(%s) = %d", + name, error); + } + error = dmu_objset_open(name, DMU_OST_OTHER, + DS_MODE_USER, &za[d].za_os); + if (error) + fatal(0, "dmu_objset_open('%s') = %d", + name, error); + (void) rw_unlock(&ztest_shared->zs_name_lock); + if (test_future) + ztest_dmu_check_future_leak(&za[t]); + zr.zr_os = za[d].za_os; + zil_replay(zr.zr_os, &zr, &zr.zr_assign, + ztest_replay_vector, NULL); + za[d].za_zilog = zil_open(za[d].za_os, NULL); + } + + VERIFY(thr_create(0, 0, ztest_thread, &za[t], THR_BOUND, + &za[t].za_thread) == 0); + } + + while (--t >= 0) { + VERIFY(thr_join(za[t].za_thread, NULL, NULL) == 0); + if (t < zopt_datasets) { + zil_close(za[t].za_zilog); + dmu_objset_close(za[t].za_os); + } + } + + if (zopt_verbose >= 3) + show_pool_stats(spa); + + txg_wait_synced(spa_get_dsl(spa), 0); + + zs->zs_alloc = spa_get_alloc(spa); + zs->zs_space = spa_get_space(spa); + + /* + * If we had out-of-space errors, destroy a random objset. + */ + if (zs->zs_enospc_count != 0) { + (void) rw_rdlock(&ztest_shared->zs_name_lock); + d = (int)ztest_random(zopt_datasets); + (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); + if (zopt_verbose >= 3) + (void) printf("Destroying %s to free up space\n", name); + (void) dmu_objset_find(name, ztest_destroy_cb, &za[d], + DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); + (void) rw_unlock(&ztest_shared->zs_name_lock); + } + + txg_wait_synced(spa_get_dsl(spa), 0); + + umem_free(za, zopt_threads * sizeof (ztest_args_t)); + + /* Kill the resume thread */ + ztest_exiting = B_TRUE; + VERIFY(thr_join(resume_tid, NULL, NULL) == 0); + + /* + * Right before closing the pool, kick off a bunch of async I/O; + * spa_close() should wait for it to complete. + */ + for (t = 1; t < 50; t++) + dmu_prefetch(spa->spa_meta_objset, t, 0, 1 << 15); + + spa_close(spa, FTAG); + + kernel_fini(); +} + +void +print_time(hrtime_t t, char *timebuf) +{ + hrtime_t s = t / NANOSEC; + hrtime_t m = s / 60; + hrtime_t h = m / 60; + hrtime_t d = h / 24; + + s -= m * 60; + m -= h * 60; + h -= d * 24; + + timebuf[0] = '\0'; + + if (d) + (void) sprintf(timebuf, + "%llud%02lluh%02llum%02llus", d, h, m, s); + else if (h) + (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s); + else if (m) + (void) sprintf(timebuf, "%llum%02llus", m, s); + else + (void) sprintf(timebuf, "%llus", s); +} + +/* + * Create a storage pool with the given name and initial vdev size. + * Then create the specified number of datasets in the pool. + */ +static void +ztest_init(char *pool) +{ + spa_t *spa; + int error; + nvlist_t *nvroot; + + kernel_init(FREAD | FWRITE); + + /* + * Create the storage pool. + */ + (void) spa_destroy(pool); + ztest_shared->zs_vdev_primaries = 0; + nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0, + 0, zopt_raidz, zopt_mirrors, 1); + error = spa_create(pool, nvroot, NULL, NULL, NULL); + nvlist_free(nvroot); + + if (error) + fatal(0, "spa_create() = %d", error); + error = spa_open(pool, &spa, FTAG); + if (error) + fatal(0, "spa_open() = %d", error); + + if (zopt_verbose >= 3) + show_pool_stats(spa); + + spa_close(spa, FTAG); + + kernel_fini(); +} + +int +main(int argc, char **argv) +{ + int kills = 0; + int iters = 0; + int i, f; + ztest_shared_t *zs; + ztest_info_t *zi; + char timebuf[100]; + char numbuf[6]; + + (void) setvbuf(stdout, NULL, _IOLBF, 0); + + /* Override location of zpool.cache */ + spa_config_path = "/tmp/zpool.cache"; + + ztest_random_fd = open("/dev/urandom", O_RDONLY); + + process_options(argc, argv); + + argc -= optind; + argv += optind; + + dprintf_setup(&argc, argv); + + /* + * Blow away any existing copy of zpool.cache + */ + if (zopt_init != 0) + (void) remove("/tmp/zpool.cache"); + + zs = ztest_shared = (void *)mmap(0, + P2ROUNDUP(sizeof (ztest_shared_t), getpagesize()), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); + + if (zopt_verbose >= 1) { + (void) printf("%llu vdevs, %d datasets, %d threads," + " %llu seconds...\n", + (u_longlong_t)zopt_vdevs, zopt_datasets, zopt_threads, + (u_longlong_t)zopt_time); + } + + /* + * Create and initialize our storage pool. + */ + for (i = 1; i <= zopt_init; i++) { + bzero(zs, sizeof (ztest_shared_t)); + if (zopt_verbose >= 3 && zopt_init != 1) + (void) printf("ztest_init(), pass %d\n", i); + ztest_init(zopt_pool); + } + + /* + * Initialize the call targets for each function. + */ + for (f = 0; f < ZTEST_FUNCS; f++) { + zi = &zs->zs_info[f]; + + *zi = ztest_info[f]; + + if (*zi->zi_interval == 0) + zi->zi_call_target = UINT64_MAX; + else + zi->zi_call_target = zopt_time / *zi->zi_interval; + } + + zs->zs_start_time = gethrtime(); + zs->zs_stop_time = zs->zs_start_time + zopt_time * NANOSEC; + + /* + * Run the tests in a loop. These tests include fault injection + * to verify that self-healing data works, and forced crashes + * to verify that we never lose on-disk consistency. + */ + while (gethrtime() < zs->zs_stop_time) { + int status; + pid_t pid; + char *tmp; + + /* + * Initialize the workload counters for each function. + */ + for (f = 0; f < ZTEST_FUNCS; f++) { + zi = &zs->zs_info[f]; + zi->zi_calls = 0; + zi->zi_call_time = 0; + } + + pid = fork(); + + if (pid == -1) + fatal(1, "fork failed"); + + if (pid == 0) { /* child */ + struct rlimit rl = { 1024, 1024 }; + (void) setrlimit(RLIMIT_NOFILE, &rl); + (void) enable_extended_FILE_stdio(-1, -1); + ztest_run(zopt_pool); + exit(0); + } + + while (waitpid(pid, &status, 0) != pid) + continue; + + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) != 0) { + (void) fprintf(stderr, + "child exited with code %d\n", + WEXITSTATUS(status)); + exit(2); + } + } else if (WIFSIGNALED(status)) { + if (WTERMSIG(status) != SIGKILL) { + (void) fprintf(stderr, + "child died with signal %d\n", + WTERMSIG(status)); + exit(3); + } + kills++; + } else { + (void) fprintf(stderr, "something strange happened " + "to child\n"); + exit(4); + } + + iters++; + + if (zopt_verbose >= 1) { + hrtime_t now = gethrtime(); + + now = MIN(now, zs->zs_stop_time); + print_time(zs->zs_stop_time - now, timebuf); + nicenum(zs->zs_space, numbuf); + + (void) printf("Pass %3d, %8s, %3llu ENOSPC, " + "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n", + iters, + WIFEXITED(status) ? "Complete" : "SIGKILL", + (u_longlong_t)zs->zs_enospc_count, + 100.0 * zs->zs_alloc / zs->zs_space, + numbuf, + 100.0 * (now - zs->zs_start_time) / + (zopt_time * NANOSEC), timebuf); + } + + if (zopt_verbose >= 2) { + (void) printf("\nWorkload summary:\n\n"); + (void) printf("%7s %9s %s\n", + "Calls", "Time", "Function"); + (void) printf("%7s %9s %s\n", + "-----", "----", "--------"); + for (f = 0; f < ZTEST_FUNCS; f++) { + Dl_info dli; + + zi = &zs->zs_info[f]; + print_time(zi->zi_call_time, timebuf); + (void) dladdr((void *)zi->zi_func, &dli); + (void) printf("%7llu %9s %s\n", + (u_longlong_t)zi->zi_calls, timebuf, + dli.dli_sname); + } + (void) printf("\n"); + } + + /* + * It's possible that we killed a child during a rename test, in + * which case we'll have a 'ztest_tmp' pool lying around instead + * of 'ztest'. Do a blind rename in case this happened. + */ + tmp = umem_alloc(strlen(zopt_pool) + 5, UMEM_NOFAIL); + (void) strcpy(tmp, zopt_pool); + (void) strcat(tmp, "_tmp"); + kernel_init(FREAD | FWRITE); + (void) spa_rename(tmp, zopt_pool); + kernel_fini(); + umem_free(tmp, strlen(tmp) + 1); + } + + ztest_verify_blocks(zopt_pool); + + if (zopt_verbose >= 1) { + (void) printf("%d killed, %d completed, %.0f%% kill rate\n", + kills, iters - kills, (100.0 * kills) / MAX(1, iters)); + } + + return (0); +} diff --git a/external/cddl/osnet/dist/common/acl/acl_common.c b/external/cddl/osnet/dist/common/acl/acl_common.c new file mode 100644 index 000000000000..94a9fc9c11a1 --- /dev/null +++ b/external/cddl/osnet/dist/common/acl/acl_common.c @@ -0,0 +1,1724 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#if defined(_KERNEL) +#include +#include +#include +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define ASSERT assert +#endif + +#define ACE_POSIX_SUPPORTED_BITS (ACE_READ_DATA | \ + ACE_WRITE_DATA | ACE_APPEND_DATA | ACE_EXECUTE | \ + ACE_READ_ATTRIBUTES | ACE_READ_ACL | ACE_WRITE_ACL) + + +#define ACL_SYNCHRONIZE_SET_DENY 0x0000001 +#define ACL_SYNCHRONIZE_SET_ALLOW 0x0000002 +#define ACL_SYNCHRONIZE_ERR_DENY 0x0000004 +#define ACL_SYNCHRONIZE_ERR_ALLOW 0x0000008 + +#define ACL_WRITE_OWNER_SET_DENY 0x0000010 +#define ACL_WRITE_OWNER_SET_ALLOW 0x0000020 +#define ACL_WRITE_OWNER_ERR_DENY 0x0000040 +#define ACL_WRITE_OWNER_ERR_ALLOW 0x0000080 + +#define ACL_DELETE_SET_DENY 0x0000100 +#define ACL_DELETE_SET_ALLOW 0x0000200 +#define ACL_DELETE_ERR_DENY 0x0000400 +#define ACL_DELETE_ERR_ALLOW 0x0000800 + +#define ACL_WRITE_ATTRS_OWNER_SET_DENY 0x0001000 +#define ACL_WRITE_ATTRS_OWNER_SET_ALLOW 0x0002000 +#define ACL_WRITE_ATTRS_OWNER_ERR_DENY 0x0004000 +#define ACL_WRITE_ATTRS_OWNER_ERR_ALLOW 0x0008000 + +#define ACL_WRITE_ATTRS_WRITER_SET_DENY 0x0010000 +#define ACL_WRITE_ATTRS_WRITER_SET_ALLOW 0x0020000 +#define ACL_WRITE_ATTRS_WRITER_ERR_DENY 0x0040000 +#define ACL_WRITE_ATTRS_WRITER_ERR_ALLOW 0x0080000 + +#define ACL_WRITE_NAMED_WRITER_SET_DENY 0x0100000 +#define ACL_WRITE_NAMED_WRITER_SET_ALLOW 0x0200000 +#define ACL_WRITE_NAMED_WRITER_ERR_DENY 0x0400000 +#define ACL_WRITE_NAMED_WRITER_ERR_ALLOW 0x0800000 + +#define ACL_READ_NAMED_READER_SET_DENY 0x1000000 +#define ACL_READ_NAMED_READER_SET_ALLOW 0x2000000 +#define ACL_READ_NAMED_READER_ERR_DENY 0x4000000 +#define ACL_READ_NAMED_READER_ERR_ALLOW 0x8000000 + + +#define ACE_VALID_MASK_BITS (\ + ACE_READ_DATA | \ + ACE_LIST_DIRECTORY | \ + ACE_WRITE_DATA | \ + ACE_ADD_FILE | \ + ACE_APPEND_DATA | \ + ACE_ADD_SUBDIRECTORY | \ + ACE_READ_NAMED_ATTRS | \ + ACE_WRITE_NAMED_ATTRS | \ + ACE_EXECUTE | \ + ACE_DELETE_CHILD | \ + ACE_READ_ATTRIBUTES | \ + ACE_WRITE_ATTRIBUTES | \ + ACE_DELETE | \ + ACE_READ_ACL | \ + ACE_WRITE_ACL | \ + ACE_WRITE_OWNER | \ + ACE_SYNCHRONIZE) + +#define ACE_MASK_UNDEFINED 0x80000000 + +#define ACE_VALID_FLAG_BITS (ACE_FILE_INHERIT_ACE | \ + ACE_DIRECTORY_INHERIT_ACE | \ + ACE_NO_PROPAGATE_INHERIT_ACE | ACE_INHERIT_ONLY_ACE | \ + ACE_SUCCESSFUL_ACCESS_ACE_FLAG | ACE_FAILED_ACCESS_ACE_FLAG | \ + ACE_IDENTIFIER_GROUP | ACE_OWNER | ACE_GROUP | ACE_EVERYONE) + +/* + * ACL conversion helpers + */ + +typedef enum { + ace_unused, + ace_user_obj, + ace_user, + ace_group, /* includes GROUP and GROUP_OBJ */ + ace_other_obj +} ace_to_aent_state_t; + +typedef struct acevals { + uid_t key; + avl_node_t avl; + uint32_t mask; + uint32_t allowed; + uint32_t denied; + int aent_type; +} acevals_t; + +typedef struct ace_list { + acevals_t user_obj; + avl_tree_t user; + int numusers; + acevals_t group_obj; + avl_tree_t group; + int numgroups; + acevals_t other_obj; + uint32_t acl_mask; + int hasmask; + int dfacl_flag; + ace_to_aent_state_t state; + int seen; /* bitmask of all aclent_t a_type values seen */ +} ace_list_t; + +ace_t trivial_acl[] = { + {(uid_t)-1, 0, ACE_OWNER, ACE_ACCESS_DENIED_ACE_TYPE}, + {(uid_t)-1, ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| + ACE_WRITE_NAMED_ATTRS, ACE_OWNER, ACE_ACCESS_ALLOWED_ACE_TYPE}, + {(uid_t)-1, 0, ACE_GROUP|ACE_IDENTIFIER_GROUP, + ACE_ACCESS_DENIED_ACE_TYPE}, + {(uid_t)-1, 0, ACE_GROUP|ACE_IDENTIFIER_GROUP, + ACE_ACCESS_ALLOWED_ACE_TYPE}, + {(uid_t)-1, ACE_WRITE_ACL|ACE_WRITE_OWNER| ACE_WRITE_ATTRIBUTES| + ACE_WRITE_NAMED_ATTRS, ACE_EVERYONE, ACE_ACCESS_DENIED_ACE_TYPE}, + {(uid_t)-1, ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS| + ACE_SYNCHRONIZE, ACE_EVERYONE, ACE_ACCESS_ALLOWED_ACE_TYPE} +}; + + +void +adjust_ace_pair_common(void *pair, size_t access_off, + size_t pairsize, mode_t mode) +{ + char *datap = (char *)pair; + uint32_t *amask0 = (uint32_t *)(uintptr_t)(datap + access_off); + uint32_t *amask1 = (uint32_t *)(uintptr_t)(datap + pairsize + + access_off); + if (mode & S_IROTH) + *amask1 |= ACE_READ_DATA; + else + *amask0 |= ACE_READ_DATA; + if (mode & S_IWOTH) + *amask1 |= ACE_WRITE_DATA|ACE_APPEND_DATA; + else + *amask0 |= ACE_WRITE_DATA|ACE_APPEND_DATA; + if (mode & S_IXOTH) + *amask1 |= ACE_EXECUTE; + else + *amask0 |= ACE_EXECUTE; +} + +void +adjust_ace_pair(ace_t *pair, mode_t mode) +{ + adjust_ace_pair_common(pair, offsetof(ace_t, a_access_mask), + sizeof (ace_t), mode); +} + +static void +ace_allow_deny_helper(uint16_t type, boolean_t *allow, boolean_t *deny) +{ + if (type == ACE_ACCESS_ALLOWED_ACE_TYPE) + *allow = B_TRUE; + else if (type == ACE_ACCESS_DENIED_ACE_TYPE) + *deny = B_TRUE; +} + +/* + * ace_trivial: + * determine whether an ace_t acl is trivial + * + * Trivialness implies that the acl is composed of only + * owner, group, everyone entries. ACL can't + * have read_acl denied, and write_owner/write_acl/write_attributes + * can only be owner@ entry. + */ +int +ace_trivial_common(void *acep, int aclcnt, + uint64_t (*walk)(void *, uint64_t, int aclcnt, + uint16_t *, uint16_t *, uint32_t *)) +{ + boolean_t owner_allow = B_FALSE; + boolean_t group_allow = B_FALSE; + boolean_t everyone_allow = B_FALSE; + boolean_t owner_deny = B_FALSE; + boolean_t group_deny = B_FALSE; + boolean_t everyone_deny = B_FALSE; + uint16_t flags; + uint32_t mask; + uint16_t type; + uint64_t cookie = 0; + + while (cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask)) { + switch (flags & ACE_TYPE_FLAGS) { + case ACE_OWNER: + if (group_allow || group_deny || everyone_allow || + everyone_deny) + return (1); + ace_allow_deny_helper(type, &owner_allow, &owner_deny); + break; + case ACE_GROUP|ACE_IDENTIFIER_GROUP: + if (everyone_allow || everyone_deny && + (!owner_allow && !owner_deny)) + return (1); + ace_allow_deny_helper(type, &group_allow, &group_deny); + break; + + case ACE_EVERYONE: + if (!owner_allow && !owner_deny && + !group_allow && !group_deny) + return (1); + ace_allow_deny_helper(type, + &everyone_allow, &everyone_deny); + break; + default: + return (1); + + } + + if (flags & (ACE_FILE_INHERIT_ACE| + ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE| + ACE_INHERIT_ONLY_ACE)) + return (1); + + /* + * Special check for some special bits + * + * Don't allow anybody to deny reading basic + * attributes or a files ACL. + */ + if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) && + (type == ACE_ACCESS_DENIED_ACE_TYPE)) + return (1); + + /* + * Allow on owner@ to allow + * write_acl/write_owner/write_attributes + */ + if (type == ACE_ACCESS_ALLOWED_ACE_TYPE && + (!(flags & ACE_OWNER) && (mask & + (ACE_WRITE_OWNER|ACE_WRITE_ACL|ACE_WRITE_ATTRIBUTES)))) + return (1); + + } + + if (!owner_allow || !owner_deny || !group_allow || !group_deny || + !everyone_allow || !everyone_deny) + return (1); + + return (0); +} + +uint64_t +ace_walk(void *datap, uint64_t cookie, int aclcnt, uint16_t *flags, + uint16_t *type, uint32_t *mask) +{ + ace_t *acep = datap; + + if (cookie >= aclcnt) + return (0); + + *flags = acep[cookie].a_flags; + *type = acep[cookie].a_type; + *mask = acep[cookie++].a_access_mask; + + return (cookie); +} + +int +ace_trivial(ace_t *acep, int aclcnt) +{ + return (ace_trivial_common(acep, aclcnt, ace_walk)); +} + +/* + * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. + * v = Ptr to array/vector of objs + * n = # objs in the array + * s = size of each obj (must be multiples of a word size) + * f = ptr to function to compare two objs + * returns (-1 = less than, 0 = equal, 1 = greater than + */ +void +ksort(caddr_t v, int n, int s, int (*f)()) +{ + int g, i, j, ii; + unsigned int *p1, *p2; + unsigned int tmp; + + /* No work to do */ + if (v == NULL || n <= 1) + return; + + /* Sanity check on arguments */ + ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); + ASSERT(s > 0); + for (g = n / 2; g > 0; g /= 2) { + for (i = g; i < n; i++) { + for (j = i - g; j >= 0 && + (*f)(v + j * s, v + (j + g) * s) == 1; + j -= g) { + p1 = (void *)(v + j * s); + p2 = (void *)(v + (j + g) * s); + for (ii = 0; ii < s / 4; ii++) { + tmp = *p1; + *p1++ = *p2; + *p2++ = tmp; + } + } + } + } +} + +/* + * Compare two acls, all fields. Returns: + * -1 (less than) + * 0 (equal) + * +1 (greater than) + */ +int +cmp2acls(void *a, void *b) +{ + aclent_t *x = (aclent_t *)a; + aclent_t *y = (aclent_t *)b; + + /* Compare types */ + if (x->a_type < y->a_type) + return (-1); + if (x->a_type > y->a_type) + return (1); + /* Equal types; compare id's */ + if (x->a_id < y->a_id) + return (-1); + if (x->a_id > y->a_id) + return (1); + /* Equal ids; compare perms */ + if (x->a_perm < y->a_perm) + return (-1); + if (x->a_perm > y->a_perm) + return (1); + /* Totally equal */ + return (0); +} + +/*ARGSUSED*/ +static void * +cacl_realloc(void *ptr, size_t size, size_t new_size) +{ +#if defined(_KERNEL) + void *tmp; + + tmp = kmem_alloc(new_size, KM_SLEEP); + (void) memcpy(tmp, ptr, (size < new_size) ? size : new_size); + kmem_free(ptr, size); + return (tmp); +#else + return (realloc(ptr, new_size)); +#endif +} + +static int +cacl_malloc(void **ptr, size_t size) +{ +#if defined(_KERNEL) + *ptr = kmem_zalloc(size, KM_SLEEP); + return (0); +#else + *ptr = calloc(1, size); + if (*ptr == NULL) + return (errno); + + return (0); +#endif +} + +/*ARGSUSED*/ +static void +cacl_free(void *ptr, size_t size) +{ +#if defined(_KERNEL) + kmem_free(ptr, size); +#else + free(ptr); +#endif +} + +acl_t * +acl_alloc(enum acl_type type) +{ + acl_t *aclp; + + if (cacl_malloc((void **)&aclp, sizeof (acl_t)) != 0) + return (NULL); + + aclp->acl_aclp = NULL; + aclp->acl_cnt = 0; + + switch (type) { + case ACE_T: + aclp->acl_type = ACE_T; + aclp->acl_entry_size = sizeof (ace_t); + break; + case ACLENT_T: + aclp->acl_type = ACLENT_T; + aclp->acl_entry_size = sizeof (aclent_t); + break; + default: + acl_free(aclp); + aclp = NULL; + } + return (aclp); +} + +/* + * Free acl_t structure + */ +void +acl_free(acl_t *aclp) +{ + int acl_size; + + if (aclp == NULL) + return; + + if (aclp->acl_aclp) { + acl_size = aclp->acl_cnt * aclp->acl_entry_size; + cacl_free(aclp->acl_aclp, acl_size); + } + + cacl_free(aclp, sizeof (acl_t)); +} + +static uint32_t +access_mask_set(int haswriteperm, int hasreadperm, int isowner, int isallow) +{ + uint32_t access_mask = 0; + int acl_produce; + int synchronize_set = 0, write_owner_set = 0; + int delete_set = 0, write_attrs_set = 0; + int read_named_set = 0, write_named_set = 0; + + acl_produce = (ACL_SYNCHRONIZE_SET_ALLOW | + ACL_WRITE_ATTRS_OWNER_SET_ALLOW | + ACL_WRITE_ATTRS_WRITER_SET_DENY); + + if (isallow) { + synchronize_set = ACL_SYNCHRONIZE_SET_ALLOW; + write_owner_set = ACL_WRITE_OWNER_SET_ALLOW; + delete_set = ACL_DELETE_SET_ALLOW; + if (hasreadperm) + read_named_set = ACL_READ_NAMED_READER_SET_ALLOW; + if (haswriteperm) + write_named_set = ACL_WRITE_NAMED_WRITER_SET_ALLOW; + if (isowner) + write_attrs_set = ACL_WRITE_ATTRS_OWNER_SET_ALLOW; + else if (haswriteperm) + write_attrs_set = ACL_WRITE_ATTRS_WRITER_SET_ALLOW; + } else { + + synchronize_set = ACL_SYNCHRONIZE_SET_DENY; + write_owner_set = ACL_WRITE_OWNER_SET_DENY; + delete_set = ACL_DELETE_SET_DENY; + if (hasreadperm) + read_named_set = ACL_READ_NAMED_READER_SET_DENY; + if (haswriteperm) + write_named_set = ACL_WRITE_NAMED_WRITER_SET_DENY; + if (isowner) + write_attrs_set = ACL_WRITE_ATTRS_OWNER_SET_DENY; + else if (haswriteperm) + write_attrs_set = ACL_WRITE_ATTRS_WRITER_SET_DENY; + else + /* + * If the entity is not the owner and does not + * have write permissions ACE_WRITE_ATTRIBUTES will + * always go in the DENY ACE. + */ + access_mask |= ACE_WRITE_ATTRIBUTES; + } + + if (acl_produce & synchronize_set) + access_mask |= ACE_SYNCHRONIZE; + if (acl_produce & write_owner_set) + access_mask |= ACE_WRITE_OWNER; + if (acl_produce & delete_set) + access_mask |= ACE_DELETE; + if (acl_produce & write_attrs_set) + access_mask |= ACE_WRITE_ATTRIBUTES; + if (acl_produce & read_named_set) + access_mask |= ACE_READ_NAMED_ATTRS; + if (acl_produce & write_named_set) + access_mask |= ACE_WRITE_NAMED_ATTRS; + + return (access_mask); +} + +/* + * Given an mode_t, convert it into an access_mask as used + * by nfsace, assuming aclent_t -> nfsace semantics. + */ +static uint32_t +mode_to_ace_access(mode_t mode, int isdir, int isowner, int isallow) +{ + uint32_t access = 0; + int haswriteperm = 0; + int hasreadperm = 0; + + if (isallow) { + haswriteperm = (mode & S_IWOTH); + hasreadperm = (mode & S_IROTH); + } else { + haswriteperm = !(mode & S_IWOTH); + hasreadperm = !(mode & S_IROTH); + } + + /* + * The following call takes care of correctly setting the following + * mask bits in the access_mask: + * ACE_SYNCHRONIZE, ACE_WRITE_OWNER, ACE_DELETE, + * ACE_WRITE_ATTRIBUTES, ACE_WRITE_NAMED_ATTRS, ACE_READ_NAMED_ATTRS + */ + access = access_mask_set(haswriteperm, hasreadperm, isowner, isallow); + + if (isallow) { + access |= ACE_READ_ACL | ACE_READ_ATTRIBUTES; + if (isowner) + access |= ACE_WRITE_ACL; + } else { + if (! isowner) + access |= ACE_WRITE_ACL; + } + + /* read */ + if (mode & S_IROTH) { + access |= ACE_READ_DATA; + } + /* write */ + if (mode & S_IWOTH) { + access |= ACE_WRITE_DATA | + ACE_APPEND_DATA; + if (isdir) + access |= ACE_DELETE_CHILD; + } + /* exec */ + if (mode & 01) { + access |= ACE_EXECUTE; + } + + return (access); +} + +/* + * Given an nfsace (presumably an ALLOW entry), make a + * corresponding DENY entry at the address given. + */ +static void +ace_make_deny(ace_t *allow, ace_t *deny, int isdir, int isowner) +{ + (void) memcpy(deny, allow, sizeof (ace_t)); + + deny->a_who = allow->a_who; + + deny->a_type = ACE_ACCESS_DENIED_ACE_TYPE; + deny->a_access_mask ^= ACE_POSIX_SUPPORTED_BITS; + if (isdir) + deny->a_access_mask ^= ACE_DELETE_CHILD; + + deny->a_access_mask &= ~(ACE_SYNCHRONIZE | ACE_WRITE_OWNER | + ACE_DELETE | ACE_WRITE_ATTRIBUTES | ACE_READ_NAMED_ATTRS | + ACE_WRITE_NAMED_ATTRS); + deny->a_access_mask |= access_mask_set((allow->a_access_mask & + ACE_WRITE_DATA), (allow->a_access_mask & ACE_READ_DATA), isowner, + B_FALSE); +} +/* + * Make an initial pass over an array of aclent_t's. Gather + * information such as an ACL_MASK (if any), number of users, + * number of groups, and whether the array needs to be sorted. + */ +static int +ln_aent_preprocess(aclent_t *aclent, int n, + int *hasmask, mode_t *mask, + int *numuser, int *numgroup, int *needsort) +{ + int error = 0; + int i; + int curtype = 0; + + *hasmask = 0; + *mask = 07; + *needsort = 0; + *numuser = 0; + *numgroup = 0; + + for (i = 0; i < n; i++) { + if (aclent[i].a_type < curtype) + *needsort = 1; + else if (aclent[i].a_type > curtype) + curtype = aclent[i].a_type; + if (aclent[i].a_type & USER) + (*numuser)++; + if (aclent[i].a_type & (GROUP | GROUP_OBJ)) + (*numgroup)++; + if (aclent[i].a_type & CLASS_OBJ) { + if (*hasmask) { + error = EINVAL; + goto out; + } else { + *hasmask = 1; + *mask = aclent[i].a_perm; + } + } + } + + if ((! *hasmask) && (*numuser + *numgroup > 1)) { + error = EINVAL; + goto out; + } + +out: + return (error); +} + +/* + * Convert an array of aclent_t into an array of nfsace entries, + * following POSIX draft -> nfsv4 conversion semantics as outlined in + * the IETF draft. + */ +static int +ln_aent_to_ace(aclent_t *aclent, int n, ace_t **acepp, int *rescount, int isdir) +{ + int error = 0; + mode_t mask; + int numuser, numgroup, needsort; + int resultsize = 0; + int i, groupi = 0, skip; + ace_t *acep, *result = NULL; + int hasmask; + + error = ln_aent_preprocess(aclent, n, &hasmask, &mask, + &numuser, &numgroup, &needsort); + if (error != 0) + goto out; + + /* allow + deny for each aclent */ + resultsize = n * 2; + if (hasmask) { + /* + * stick extra deny on the group_obj and on each + * user|group for the mask (the group_obj was added + * into the count for numgroup) + */ + resultsize += numuser + numgroup; + /* ... and don't count the mask itself */ + resultsize -= 2; + } + + /* sort the source if necessary */ + if (needsort) + ksort((caddr_t)aclent, n, sizeof (aclent_t), cmp2acls); + + if (cacl_malloc((void **)&result, resultsize * sizeof (ace_t)) != 0) + goto out; + + acep = result; + + for (i = 0; i < n; i++) { + /* + * don't process CLASS_OBJ (mask); mask was grabbed in + * ln_aent_preprocess() + */ + if (aclent[i].a_type & CLASS_OBJ) + continue; + + /* If we need an ACL_MASK emulator, prepend it now */ + if ((hasmask) && + (aclent[i].a_type & (USER | GROUP | GROUP_OBJ))) { + acep->a_type = ACE_ACCESS_DENIED_ACE_TYPE; + acep->a_flags = 0; + if (aclent[i].a_type & GROUP_OBJ) { + acep->a_who = (uid_t)-1; + acep->a_flags |= + (ACE_IDENTIFIER_GROUP|ACE_GROUP); + } else if (aclent[i].a_type & USER) { + acep->a_who = aclent[i].a_id; + } else { + acep->a_who = aclent[i].a_id; + acep->a_flags |= ACE_IDENTIFIER_GROUP; + } + if (aclent[i].a_type & ACL_DEFAULT) { + acep->a_flags |= ACE_INHERIT_ONLY_ACE | + ACE_FILE_INHERIT_ACE | + ACE_DIRECTORY_INHERIT_ACE; + } + /* + * Set the access mask for the prepended deny + * ace. To do this, we invert the mask (found + * in ln_aent_preprocess()) then convert it to an + * DENY ace access_mask. + */ + acep->a_access_mask = mode_to_ace_access((mask ^ 07), + isdir, 0, 0); + acep += 1; + } + + /* handle a_perm -> access_mask */ + acep->a_access_mask = mode_to_ace_access(aclent[i].a_perm, + isdir, aclent[i].a_type & USER_OBJ, 1); + + /* emulate a default aclent */ + if (aclent[i].a_type & ACL_DEFAULT) { + acep->a_flags |= ACE_INHERIT_ONLY_ACE | + ACE_FILE_INHERIT_ACE | + ACE_DIRECTORY_INHERIT_ACE; + } + + /* + * handle a_perm and a_id + * + * this must be done last, since it involves the + * corresponding deny aces, which are handled + * differently for each different a_type. + */ + if (aclent[i].a_type & USER_OBJ) { + acep->a_who = (uid_t)-1; + acep->a_flags |= ACE_OWNER; + ace_make_deny(acep, acep + 1, isdir, B_TRUE); + acep += 2; + } else if (aclent[i].a_type & USER) { + acep->a_who = aclent[i].a_id; + ace_make_deny(acep, acep + 1, isdir, B_FALSE); + acep += 2; + } else if (aclent[i].a_type & (GROUP_OBJ | GROUP)) { + if (aclent[i].a_type & GROUP_OBJ) { + acep->a_who = (uid_t)-1; + acep->a_flags |= ACE_GROUP; + } else { + acep->a_who = aclent[i].a_id; + } + acep->a_flags |= ACE_IDENTIFIER_GROUP; + /* + * Set the corresponding deny for the group ace. + * + * The deny aces go after all of the groups, unlike + * everything else, where they immediately follow + * the allow ace. + * + * We calculate "skip", the number of slots to + * skip ahead for the deny ace, here. + * + * The pattern is: + * MD1 A1 MD2 A2 MD3 A3 D1 D2 D3 + * thus, skip is + * (2 * numgroup) - 1 - groupi + * (2 * numgroup) to account for MD + A + * - 1 to account for the fact that we're on the + * access (A), not the mask (MD) + * - groupi to account for the fact that we have + * passed up groupi number of MD's. + */ + skip = (2 * numgroup) - 1 - groupi; + ace_make_deny(acep, acep + skip, isdir, B_FALSE); + /* + * If we just did the last group, skip acep past + * all of the denies; else, just move ahead one. + */ + if (++groupi >= numgroup) + acep += numgroup + 1; + else + acep += 1; + } else if (aclent[i].a_type & OTHER_OBJ) { + acep->a_who = (uid_t)-1; + acep->a_flags |= ACE_EVERYONE; + ace_make_deny(acep, acep + 1, isdir, B_FALSE); + acep += 2; + } else { + error = EINVAL; + goto out; + } + } + + *acepp = result; + *rescount = resultsize; + +out: + if (error != 0) { + if ((result != NULL) && (resultsize > 0)) { + cacl_free(result, resultsize * sizeof (ace_t)); + } + } + + return (error); +} + +static int +convert_aent_to_ace(aclent_t *aclentp, int aclcnt, int isdir, + ace_t **retacep, int *retacecnt) +{ + ace_t *acep; + ace_t *dfacep; + int acecnt = 0; + int dfacecnt = 0; + int dfaclstart = 0; + int dfaclcnt = 0; + aclent_t *aclp; + int i; + int error; + int acesz, dfacesz; + + ksort((caddr_t)aclentp, aclcnt, sizeof (aclent_t), cmp2acls); + + for (i = 0, aclp = aclentp; i < aclcnt; aclp++, i++) { + if (aclp->a_type & ACL_DEFAULT) + break; + } + + if (i < aclcnt) { + dfaclstart = i; + dfaclcnt = aclcnt - i; + } + + if (dfaclcnt && isdir == 0) { + return (EINVAL); + } + + error = ln_aent_to_ace(aclentp, i, &acep, &acecnt, isdir); + if (error) + return (error); + + if (dfaclcnt) { + error = ln_aent_to_ace(&aclentp[dfaclstart], dfaclcnt, + &dfacep, &dfacecnt, isdir); + if (error) { + if (acep) { + cacl_free(acep, acecnt * sizeof (ace_t)); + } + return (error); + } + } + + if (dfacecnt != 0) { + acesz = sizeof (ace_t) * acecnt; + dfacesz = sizeof (ace_t) * dfacecnt; + acep = cacl_realloc(acep, acesz, acesz + dfacesz); + if (acep == NULL) + return (ENOMEM); + if (dfaclcnt) { + (void) memcpy(acep + acecnt, dfacep, dfacesz); + } + } + if (dfaclcnt) + cacl_free(dfacep, dfacecnt * sizeof (ace_t)); + + *retacecnt = acecnt + dfacecnt; + *retacep = acep; + return (0); +} + +static int +ace_mask_to_mode(uint32_t mask, o_mode_t *modep, int isdir) +{ + int error = 0; + o_mode_t mode = 0; + uint32_t bits, wantbits; + + /* read */ + if (mask & ACE_READ_DATA) + mode |= S_IROTH; + + /* write */ + wantbits = (ACE_WRITE_DATA | ACE_APPEND_DATA); + if (isdir) + wantbits |= ACE_DELETE_CHILD; + bits = mask & wantbits; + if (bits != 0) { + if (bits != wantbits) { + error = ENOTSUP; + goto out; + } + mode |= S_IWOTH; + } + + /* exec */ + if (mask & ACE_EXECUTE) { + mode |= S_IXOTH; + } + + *modep = mode; + +out: + return (error); +} + +static void +acevals_init(acevals_t *vals, uid_t key) +{ + bzero(vals, sizeof (*vals)); + vals->allowed = ACE_MASK_UNDEFINED; + vals->denied = ACE_MASK_UNDEFINED; + vals->mask = ACE_MASK_UNDEFINED; + vals->key = key; +} + +static void +ace_list_init(ace_list_t *al, int dfacl_flag) +{ + acevals_init(&al->user_obj, NULL); + acevals_init(&al->group_obj, NULL); + acevals_init(&al->other_obj, NULL); + al->numusers = 0; + al->numgroups = 0; + al->acl_mask = 0; + al->hasmask = 0; + al->state = ace_unused; + al->seen = 0; + al->dfacl_flag = dfacl_flag; +} + +/* + * Find or create an acevals holder for a given id and avl tree. + * + * Note that only one thread will ever touch these avl trees, so + * there is no need for locking. + */ +static acevals_t * +acevals_find(ace_t *ace, avl_tree_t *avl, int *num) +{ + acevals_t key, *rc; + avl_index_t where; + + key.key = ace->a_who; + rc = avl_find(avl, &key, &where); + if (rc != NULL) + return (rc); + + /* this memory is freed by ln_ace_to_aent()->ace_list_free() */ + if (cacl_malloc((void **)&rc, sizeof (acevals_t)) != 0) + return (NULL); + + acevals_init(rc, ace->a_who); + avl_insert(avl, rc, where); + (*num)++; + + return (rc); +} + +static int +access_mask_check(ace_t *acep, int mask_bit, int isowner) +{ + int set_deny, err_deny; + int set_allow, err_allow; + int acl_consume; + int haswriteperm, hasreadperm; + + if (acep->a_type == ACE_ACCESS_DENIED_ACE_TYPE) { + haswriteperm = (acep->a_access_mask & ACE_WRITE_DATA) ? 0 : 1; + hasreadperm = (acep->a_access_mask & ACE_READ_DATA) ? 0 : 1; + } else { + haswriteperm = (acep->a_access_mask & ACE_WRITE_DATA) ? 1 : 0; + hasreadperm = (acep->a_access_mask & ACE_READ_DATA) ? 1 : 0; + } + + acl_consume = (ACL_SYNCHRONIZE_ERR_DENY | + ACL_DELETE_ERR_DENY | + ACL_WRITE_OWNER_ERR_DENY | + ACL_WRITE_OWNER_ERR_ALLOW | + ACL_WRITE_ATTRS_OWNER_SET_ALLOW | + ACL_WRITE_ATTRS_OWNER_ERR_DENY | + ACL_WRITE_ATTRS_WRITER_SET_DENY | + ACL_WRITE_ATTRS_WRITER_ERR_ALLOW | + ACL_WRITE_NAMED_WRITER_ERR_DENY | + ACL_READ_NAMED_READER_ERR_DENY); + + if (mask_bit == ACE_SYNCHRONIZE) { + set_deny = ACL_SYNCHRONIZE_SET_DENY; + err_deny = ACL_SYNCHRONIZE_ERR_DENY; + set_allow = ACL_SYNCHRONIZE_SET_ALLOW; + err_allow = ACL_SYNCHRONIZE_ERR_ALLOW; + } else if (mask_bit == ACE_WRITE_OWNER) { + set_deny = ACL_WRITE_OWNER_SET_DENY; + err_deny = ACL_WRITE_OWNER_ERR_DENY; + set_allow = ACL_WRITE_OWNER_SET_ALLOW; + err_allow = ACL_WRITE_OWNER_ERR_ALLOW; + } else if (mask_bit == ACE_DELETE) { + set_deny = ACL_DELETE_SET_DENY; + err_deny = ACL_DELETE_ERR_DENY; + set_allow = ACL_DELETE_SET_ALLOW; + err_allow = ACL_DELETE_ERR_ALLOW; + } else if (mask_bit == ACE_WRITE_ATTRIBUTES) { + if (isowner) { + set_deny = ACL_WRITE_ATTRS_OWNER_SET_DENY; + err_deny = ACL_WRITE_ATTRS_OWNER_ERR_DENY; + set_allow = ACL_WRITE_ATTRS_OWNER_SET_ALLOW; + err_allow = ACL_WRITE_ATTRS_OWNER_ERR_ALLOW; + } else if (haswriteperm) { + set_deny = ACL_WRITE_ATTRS_WRITER_SET_DENY; + err_deny = ACL_WRITE_ATTRS_WRITER_ERR_DENY; + set_allow = ACL_WRITE_ATTRS_WRITER_SET_ALLOW; + err_allow = ACL_WRITE_ATTRS_WRITER_ERR_ALLOW; + } else { + if ((acep->a_access_mask & mask_bit) && + (acep->a_type & ACE_ACCESS_ALLOWED_ACE_TYPE)) { + return (ENOTSUP); + } + return (0); + } + } else if (mask_bit == ACE_READ_NAMED_ATTRS) { + if (!hasreadperm) + return (0); + + set_deny = ACL_READ_NAMED_READER_SET_DENY; + err_deny = ACL_READ_NAMED_READER_ERR_DENY; + set_allow = ACL_READ_NAMED_READER_SET_ALLOW; + err_allow = ACL_READ_NAMED_READER_ERR_ALLOW; + } else if (mask_bit == ACE_WRITE_NAMED_ATTRS) { + if (!haswriteperm) + return (0); + + set_deny = ACL_WRITE_NAMED_WRITER_SET_DENY; + err_deny = ACL_WRITE_NAMED_WRITER_ERR_DENY; + set_allow = ACL_WRITE_NAMED_WRITER_SET_ALLOW; + err_allow = ACL_WRITE_NAMED_WRITER_ERR_ALLOW; + } else { + return (EINVAL); + } + + if (acep->a_type == ACE_ACCESS_DENIED_ACE_TYPE) { + if (acl_consume & set_deny) { + if (!(acep->a_access_mask & mask_bit)) { + return (ENOTSUP); + } + } else if (acl_consume & err_deny) { + if (acep->a_access_mask & mask_bit) { + return (ENOTSUP); + } + } + } else { + /* ACE_ACCESS_ALLOWED_ACE_TYPE */ + if (acl_consume & set_allow) { + if (!(acep->a_access_mask & mask_bit)) { + return (ENOTSUP); + } + } else if (acl_consume & err_allow) { + if (acep->a_access_mask & mask_bit) { + return (ENOTSUP); + } + } + } + return (0); +} + +static int +ace_to_aent_legal(ace_t *acep) +{ + int error = 0; + int isowner; + + /* only ALLOW or DENY */ + if ((acep->a_type != ACE_ACCESS_ALLOWED_ACE_TYPE) && + (acep->a_type != ACE_ACCESS_DENIED_ACE_TYPE)) { + error = ENOTSUP; + goto out; + } + + /* check for invalid flags */ + if (acep->a_flags & ~(ACE_VALID_FLAG_BITS)) { + error = EINVAL; + goto out; + } + + /* some flags are illegal */ + if (acep->a_flags & (ACE_SUCCESSFUL_ACCESS_ACE_FLAG | + ACE_FAILED_ACCESS_ACE_FLAG | + ACE_NO_PROPAGATE_INHERIT_ACE)) { + error = ENOTSUP; + goto out; + } + + /* check for invalid masks */ + if (acep->a_access_mask & ~(ACE_VALID_MASK_BITS)) { + error = EINVAL; + goto out; + } + + if ((acep->a_flags & ACE_OWNER)) { + isowner = 1; + } else { + isowner = 0; + } + + error = access_mask_check(acep, ACE_SYNCHRONIZE, isowner); + if (error) + goto out; + + error = access_mask_check(acep, ACE_WRITE_OWNER, isowner); + if (error) + goto out; + + error = access_mask_check(acep, ACE_DELETE, isowner); + if (error) + goto out; + + error = access_mask_check(acep, ACE_WRITE_ATTRIBUTES, isowner); + if (error) + goto out; + + error = access_mask_check(acep, ACE_READ_NAMED_ATTRS, isowner); + if (error) + goto out; + + error = access_mask_check(acep, ACE_WRITE_NAMED_ATTRS, isowner); + if (error) + goto out; + + /* more detailed checking of masks */ + if (acep->a_type == ACE_ACCESS_ALLOWED_ACE_TYPE) { + if (! (acep->a_access_mask & ACE_READ_ATTRIBUTES)) { + error = ENOTSUP; + goto out; + } + if ((acep->a_access_mask & ACE_WRITE_DATA) && + (! (acep->a_access_mask & ACE_APPEND_DATA))) { + error = ENOTSUP; + goto out; + } + if ((! (acep->a_access_mask & ACE_WRITE_DATA)) && + (acep->a_access_mask & ACE_APPEND_DATA)) { + error = ENOTSUP; + goto out; + } + } + + /* ACL enforcement */ + if ((acep->a_access_mask & ACE_READ_ACL) && + (acep->a_type != ACE_ACCESS_ALLOWED_ACE_TYPE)) { + error = ENOTSUP; + goto out; + } + if (acep->a_access_mask & ACE_WRITE_ACL) { + if ((acep->a_type == ACE_ACCESS_DENIED_ACE_TYPE) && + (isowner)) { + error = ENOTSUP; + goto out; + } + if ((acep->a_type == ACE_ACCESS_ALLOWED_ACE_TYPE) && + (! isowner)) { + error = ENOTSUP; + goto out; + } + } + +out: + return (error); +} + +static int +ace_allow_to_mode(uint32_t mask, o_mode_t *modep, int isdir) +{ + /* ACE_READ_ACL and ACE_READ_ATTRIBUTES must both be set */ + if ((mask & (ACE_READ_ACL | ACE_READ_ATTRIBUTES)) != + (ACE_READ_ACL | ACE_READ_ATTRIBUTES)) { + return (ENOTSUP); + } + + return (ace_mask_to_mode(mask, modep, isdir)); +} + +static int +acevals_to_aent(acevals_t *vals, aclent_t *dest, ace_list_t *list, + uid_t owner, gid_t group, int isdir) +{ + int error; + uint32_t flips = ACE_POSIX_SUPPORTED_BITS; + + if (isdir) + flips |= ACE_DELETE_CHILD; + if (vals->allowed != (vals->denied ^ flips)) { + error = ENOTSUP; + goto out; + } + if ((list->hasmask) && (list->acl_mask != vals->mask) && + (vals->aent_type & (USER | GROUP | GROUP_OBJ))) { + error = ENOTSUP; + goto out; + } + error = ace_allow_to_mode(vals->allowed, &dest->a_perm, isdir); + if (error != 0) + goto out; + dest->a_type = vals->aent_type; + if (dest->a_type & (USER | GROUP)) { + dest->a_id = vals->key; + } else if (dest->a_type & USER_OBJ) { + dest->a_id = owner; + } else if (dest->a_type & GROUP_OBJ) { + dest->a_id = group; + } else if (dest->a_type & OTHER_OBJ) { + dest->a_id = 0; + } else { + error = EINVAL; + goto out; + } + +out: + return (error); +} + + +static int +ace_list_to_aent(ace_list_t *list, aclent_t **aclentp, int *aclcnt, + uid_t owner, gid_t group, int isdir) +{ + int error = 0; + aclent_t *aent, *result = NULL; + acevals_t *vals; + int resultcount; + + if ((list->seen & (USER_OBJ | GROUP_OBJ | OTHER_OBJ)) != + (USER_OBJ | GROUP_OBJ | OTHER_OBJ)) { + error = ENOTSUP; + goto out; + } + if ((! list->hasmask) && (list->numusers + list->numgroups > 0)) { + error = ENOTSUP; + goto out; + } + + resultcount = 3 + list->numusers + list->numgroups; + /* + * This must be the same condition as below, when we add the CLASS_OBJ + * (aka ACL mask) + */ + if ((list->hasmask) || (! list->dfacl_flag)) + resultcount += 1; + + if (cacl_malloc((void **)&result, + resultcount * sizeof (aclent_t)) != 0) { + error = ENOMEM; + goto out; + } + aent = result; + + /* USER_OBJ */ + if (!(list->user_obj.aent_type & USER_OBJ)) { + error = EINVAL; + goto out; + } + + error = acevals_to_aent(&list->user_obj, aent, list, owner, group, + isdir); + + if (error != 0) + goto out; + ++aent; + /* USER */ + vals = NULL; + for (vals = avl_first(&list->user); vals != NULL; + vals = AVL_NEXT(&list->user, vals)) { + if (!(vals->aent_type & USER)) { + error = EINVAL; + goto out; + } + error = acevals_to_aent(vals, aent, list, owner, group, + isdir); + if (error != 0) + goto out; + ++aent; + } + /* GROUP_OBJ */ + if (!(list->group_obj.aent_type & GROUP_OBJ)) { + error = EINVAL; + goto out; + } + error = acevals_to_aent(&list->group_obj, aent, list, owner, group, + isdir); + if (error != 0) + goto out; + ++aent; + /* GROUP */ + vals = NULL; + for (vals = avl_first(&list->group); vals != NULL; + vals = AVL_NEXT(&list->group, vals)) { + if (!(vals->aent_type & GROUP)) { + error = EINVAL; + goto out; + } + error = acevals_to_aent(vals, aent, list, owner, group, + isdir); + if (error != 0) + goto out; + ++aent; + } + /* + * CLASS_OBJ (aka ACL_MASK) + * + * An ACL_MASK is not fabricated if the ACL is a default ACL. + * This is to follow UFS's behavior. + */ + if ((list->hasmask) || (! list->dfacl_flag)) { + if (list->hasmask) { + uint32_t flips = ACE_POSIX_SUPPORTED_BITS; + if (isdir) + flips |= ACE_DELETE_CHILD; + error = ace_mask_to_mode(list->acl_mask ^ flips, + &aent->a_perm, isdir); + if (error != 0) + goto out; + } else { + /* fabricate the ACL_MASK from the group permissions */ + error = ace_mask_to_mode(list->group_obj.allowed, + &aent->a_perm, isdir); + if (error != 0) + goto out; + } + aent->a_id = 0; + aent->a_type = CLASS_OBJ | list->dfacl_flag; + ++aent; + } + /* OTHER_OBJ */ + if (!(list->other_obj.aent_type & OTHER_OBJ)) { + error = EINVAL; + goto out; + } + error = acevals_to_aent(&list->other_obj, aent, list, owner, group, + isdir); + if (error != 0) + goto out; + ++aent; + + *aclentp = result; + *aclcnt = resultcount; + +out: + if (error != 0) { + if (result != NULL) + cacl_free(result, resultcount * sizeof (aclent_t)); + } + + return (error); +} + + +/* + * free all data associated with an ace_list + */ +static void +ace_list_free(ace_list_t *al) +{ + acevals_t *node; + void *cookie; + + if (al == NULL) + return; + + cookie = NULL; + while ((node = avl_destroy_nodes(&al->user, &cookie)) != NULL) + cacl_free(node, sizeof (acevals_t)); + cookie = NULL; + while ((node = avl_destroy_nodes(&al->group, &cookie)) != NULL) + cacl_free(node, sizeof (acevals_t)); + + avl_destroy(&al->user); + avl_destroy(&al->group); + + /* free the container itself */ + cacl_free(al, sizeof (ace_list_t)); +} + +static int +acevals_compare(const void *va, const void *vb) +{ + const acevals_t *a = va, *b = vb; + + if (a->key == b->key) + return (0); + + if (a->key > b->key) + return (1); + + else + return (-1); +} + +/* + * Convert a list of ace_t entries to equivalent regular and default + * aclent_t lists. Return error (ENOTSUP) when conversion is not possible. + */ +static int +ln_ace_to_aent(ace_t *ace, int n, uid_t owner, gid_t group, + aclent_t **aclentp, int *aclcnt, aclent_t **dfaclentp, int *dfaclcnt, + int isdir) +{ + int error = 0; + ace_t *acep; + uint32_t bits; + int i; + ace_list_t *normacl = NULL, *dfacl = NULL, *acl; + acevals_t *vals; + + *aclentp = NULL; + *aclcnt = 0; + *dfaclentp = NULL; + *dfaclcnt = 0; + + /* we need at least user_obj, group_obj, and other_obj */ + if (n < 6) { + error = ENOTSUP; + goto out; + } + if (ace == NULL) { + error = EINVAL; + goto out; + } + + error = cacl_malloc((void **)&normacl, sizeof (ace_list_t)); + if (error != 0) + goto out; + + avl_create(&normacl->user, acevals_compare, sizeof (acevals_t), + offsetof(acevals_t, avl)); + avl_create(&normacl->group, acevals_compare, sizeof (acevals_t), + offsetof(acevals_t, avl)); + + ace_list_init(normacl, 0); + + error = cacl_malloc((void **)&dfacl, sizeof (ace_list_t)); + if (error != 0) + goto out; + + avl_create(&dfacl->user, acevals_compare, sizeof (acevals_t), + offsetof(acevals_t, avl)); + avl_create(&dfacl->group, acevals_compare, sizeof (acevals_t), + offsetof(acevals_t, avl)); + ace_list_init(dfacl, ACL_DEFAULT); + + /* process every ace_t... */ + for (i = 0; i < n; i++) { + acep = &ace[i]; + + /* rule out certain cases quickly */ + error = ace_to_aent_legal(acep); + if (error != 0) + goto out; + + /* + * Turn off these bits in order to not have to worry about + * them when doing the checks for compliments. + */ + acep->a_access_mask &= ~(ACE_WRITE_OWNER | ACE_DELETE | + ACE_SYNCHRONIZE | ACE_WRITE_ATTRIBUTES | + ACE_READ_NAMED_ATTRS | ACE_WRITE_NAMED_ATTRS); + + /* see if this should be a regular or default acl */ + bits = acep->a_flags & + (ACE_INHERIT_ONLY_ACE | + ACE_FILE_INHERIT_ACE | + ACE_DIRECTORY_INHERIT_ACE); + if (bits != 0) { + /* all or nothing on these inherit bits */ + if (bits != (ACE_INHERIT_ONLY_ACE | + ACE_FILE_INHERIT_ACE | + ACE_DIRECTORY_INHERIT_ACE)) { + error = ENOTSUP; + goto out; + } + acl = dfacl; + } else { + acl = normacl; + } + + if ((acep->a_flags & ACE_OWNER)) { + if (acl->state > ace_user_obj) { + error = ENOTSUP; + goto out; + } + acl->state = ace_user_obj; + acl->seen |= USER_OBJ; + vals = &acl->user_obj; + vals->aent_type = USER_OBJ | acl->dfacl_flag; + } else if ((acep->a_flags & ACE_EVERYONE)) { + acl->state = ace_other_obj; + acl->seen |= OTHER_OBJ; + vals = &acl->other_obj; + vals->aent_type = OTHER_OBJ | acl->dfacl_flag; + } else if (acep->a_flags & ACE_IDENTIFIER_GROUP) { + if (acl->state > ace_group) { + error = ENOTSUP; + goto out; + } + if ((acep->a_flags & ACE_GROUP)) { + acl->seen |= GROUP_OBJ; + vals = &acl->group_obj; + vals->aent_type = GROUP_OBJ | acl->dfacl_flag; + } else { + acl->seen |= GROUP; + vals = acevals_find(acep, &acl->group, + &acl->numgroups); + if (vals == NULL) { + error = ENOMEM; + goto out; + } + vals->aent_type = GROUP | acl->dfacl_flag; + } + acl->state = ace_group; + } else { + if (acl->state > ace_user) { + error = ENOTSUP; + goto out; + } + acl->state = ace_user; + acl->seen |= USER; + vals = acevals_find(acep, &acl->user, + &acl->numusers); + if (vals == NULL) { + error = ENOMEM; + goto out; + } + vals->aent_type = USER | acl->dfacl_flag; + } + + if (!(acl->state > ace_unused)) { + error = EINVAL; + goto out; + } + + if (acep->a_type == ACE_ACCESS_ALLOWED_ACE_TYPE) { + /* no more than one allowed per aclent_t */ + if (vals->allowed != ACE_MASK_UNDEFINED) { + error = ENOTSUP; + goto out; + } + vals->allowed = acep->a_access_mask; + } else { + /* + * it's a DENY; if there was a previous DENY, it + * must have been an ACL_MASK. + */ + if (vals->denied != ACE_MASK_UNDEFINED) { + /* ACL_MASK is for USER and GROUP only */ + if ((acl->state != ace_user) && + (acl->state != ace_group)) { + error = ENOTSUP; + goto out; + } + + if (! acl->hasmask) { + acl->hasmask = 1; + acl->acl_mask = vals->denied; + /* check for mismatched ACL_MASK emulations */ + } else if (acl->acl_mask != vals->denied) { + error = ENOTSUP; + goto out; + } + vals->mask = vals->denied; + } + vals->denied = acep->a_access_mask; + } + } + + /* done collating; produce the aclent_t lists */ + if (normacl->state != ace_unused) { + error = ace_list_to_aent(normacl, aclentp, aclcnt, + owner, group, isdir); + if (error != 0) { + goto out; + } + } + if (dfacl->state != ace_unused) { + error = ace_list_to_aent(dfacl, dfaclentp, dfaclcnt, + owner, group, isdir); + if (error != 0) { + goto out; + } + } + +out: + if (normacl != NULL) + ace_list_free(normacl); + if (dfacl != NULL) + ace_list_free(dfacl); + + return (error); +} + +static int +convert_ace_to_aent(ace_t *acebufp, int acecnt, int isdir, + uid_t owner, gid_t group, aclent_t **retaclentp, int *retaclcnt) +{ + int error = 0; + aclent_t *aclentp, *dfaclentp; + int aclcnt, dfaclcnt; + int aclsz, dfaclsz; + + error = ln_ace_to_aent(acebufp, acecnt, owner, group, + &aclentp, &aclcnt, &dfaclentp, &dfaclcnt, isdir); + + if (error) + return (error); + + + if (dfaclcnt != 0) { + /* + * Slap aclentp and dfaclentp into a single array. + */ + aclsz = sizeof (aclent_t) * aclcnt; + dfaclsz = sizeof (aclent_t) * dfaclcnt; + aclentp = cacl_realloc(aclentp, aclsz, aclsz + dfaclsz); + if (aclentp != NULL) { + (void) memcpy(aclentp + aclcnt, dfaclentp, dfaclsz); + } else { + error = ENOMEM; + } + } + + if (aclentp) { + *retaclentp = aclentp; + *retaclcnt = aclcnt + dfaclcnt; + } + + if (dfaclentp) + cacl_free(dfaclentp, dfaclsz); + + return (error); +} + + +int +acl_translate(acl_t *aclp, int target_flavor, int isdir, uid_t owner, + gid_t group) +{ + int aclcnt; + void *acldata; + int error; + + /* + * See if we need to translate + */ + if ((target_flavor == _ACL_ACE_ENABLED && aclp->acl_type == ACE_T) || + (target_flavor == _ACL_ACLENT_ENABLED && + aclp->acl_type == ACLENT_T)) + return (0); + + if (target_flavor == -1) { + error = EINVAL; + goto out; + } + + if (target_flavor == _ACL_ACE_ENABLED && + aclp->acl_type == ACLENT_T) { + error = convert_aent_to_ace(aclp->acl_aclp, + aclp->acl_cnt, isdir, (ace_t **)&acldata, &aclcnt); + if (error) + goto out; + + } else if (target_flavor == _ACL_ACLENT_ENABLED && + aclp->acl_type == ACE_T) { + error = convert_ace_to_aent(aclp->acl_aclp, aclp->acl_cnt, + isdir, owner, group, (aclent_t **)&acldata, &aclcnt); + if (error) + goto out; + } else { + error = ENOTSUP; + goto out; + } + + /* + * replace old acl with newly translated acl + */ + cacl_free(aclp->acl_aclp, aclp->acl_cnt * aclp->acl_entry_size); + aclp->acl_aclp = acldata; + aclp->acl_cnt = aclcnt; + if (target_flavor == _ACL_ACE_ENABLED) { + aclp->acl_type = ACE_T; + aclp->acl_entry_size = sizeof (ace_t); + } else { + aclp->acl_type = ACLENT_T; + aclp->acl_entry_size = sizeof (aclent_t); + } + return (0); + +out: + +#if !defined(_KERNEL) + errno = error; + return (-1); +#else + return (error); +#endif +} diff --git a/external/cddl/osnet/dist/common/acl/acl_common.h b/external/cddl/osnet/dist/common/acl/acl_common.h new file mode 100644 index 000000000000..84bd04f52fd6 --- /dev/null +++ b/external/cddl/osnet/dist/common/acl/acl_common.h @@ -0,0 +1,61 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ACL_COMMON_H +#define _ACL_COMMON_H + +#pragma ident "%Z%%M% %I% %E% SMI" + + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern ace_t trivial_acl[6]; + +extern int acltrivial(const char *); +extern void adjust_ace_pair(ace_t *pair, mode_t mode); +extern void adjust_ace_pair_common(void *, size_t, size_t, mode_t); +extern int ace_trivial(ace_t *acep, int aclcnt); +extern int ace_trivial_common(void *, int, + uint64_t (*walk)(void *, uint64_t, int aclcnt, uint16_t *, uint16_t *, + uint32_t *mask)); +extern acl_t *acl_alloc(acl_type_t); +extern void acl_free(acl_t *aclp); +extern int acl_translate(acl_t *aclp, int target_flavor, + int isdir, uid_t owner, gid_t group); +void ksort(caddr_t v, int n, int s, int (*f)()); +int cmp2acls(void *a, void *b); + + +#ifdef __cplusplus +} +#endif + +#endif /* _ACL_COMMON_H */ diff --git a/external/cddl/osnet/dist/common/avl/avl.c b/external/cddl/osnet/dist/common/avl/avl.c new file mode 100644 index 000000000000..c9727c643b96 --- /dev/null +++ b/external/cddl/osnet/dist/common/avl/avl.c @@ -0,0 +1,1033 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + + +/* + * AVL - generic AVL tree implementation for kernel use + * + * A complete description of AVL trees can be found in many CS textbooks. + * + * Here is a very brief overview. An AVL tree is a binary search tree that is + * almost perfectly balanced. By "almost" perfectly balanced, we mean that at + * any given node, the left and right subtrees are allowed to differ in height + * by at most 1 level. + * + * This relaxation from a perfectly balanced binary tree allows doing + * insertion and deletion relatively efficiently. Searching the tree is + * still a fast operation, roughly O(log(N)). + * + * The key to insertion and deletion is a set of tree maniuplations called + * rotations, which bring unbalanced subtrees back into the semi-balanced state. + * + * This implementation of AVL trees has the following peculiarities: + * + * - The AVL specific data structures are physically embedded as fields + * in the "using" data structures. To maintain generality the code + * must constantly translate between "avl_node_t *" and containing + * data structure "void *"s by adding/subracting the avl_offset. + * + * - Since the AVL data is always embedded in other structures, there is + * no locking or memory allocation in the AVL routines. This must be + * provided for by the enclosing data structure's semantics. Typically, + * avl_insert()/_add()/_remove()/avl_insert_here() require some kind of + * exclusive write lock. Other operations require a read lock. + * + * - The implementation uses iteration instead of explicit recursion, + * since it is intended to run on limited size kernel stacks. Since + * there is no recursion stack present to move "up" in the tree, + * there is an explicit "parent" link in the avl_node_t. + * + * - The left/right children pointers of a node are in an array. + * In the code, variables (instead of constants) are used to represent + * left and right indices. The implementation is written as if it only + * dealt with left handed manipulations. By changing the value assigned + * to "left", the code also works for right handed trees. The + * following variables/terms are frequently used: + * + * int left; // 0 when dealing with left children, + * // 1 for dealing with right children + * + * int left_heavy; // -1 when left subtree is taller at some node, + * // +1 when right subtree is taller + * + * int right; // will be the opposite of left (0 or 1) + * int right_heavy;// will be the opposite of left_heavy (-1 or 1) + * + * int direction; // 0 for "<" (ie. left child); 1 for ">" (right) + * + * Though it is a little more confusing to read the code, the approach + * allows using half as much code (and hence cache footprint) for tree + * manipulations and eliminates many conditional branches. + * + * - The avl_index_t is an opaque "cookie" used to find nodes at or + * adjacent to where a new value would be inserted in the tree. The value + * is a modified "avl_node_t *". The bottom bit (normally 0 for a + * pointer) is set to indicate if that the new node has a value greater + * than the value of the indicated "avl_node_t *". + */ + +#include +#include +#include +#include +#include + +/* + * Small arrays to translate between balance (or diff) values and child indeces. + * + * Code that deals with binary tree data structures will randomly use + * left and right children when examining a tree. C "if()" statements + * which evaluate randomly suffer from very poor hardware branch prediction. + * In this code we avoid some of the branch mispredictions by using the + * following translation arrays. They replace random branches with an + * additional memory reference. Since the translation arrays are both very + * small the data should remain efficiently in cache. + */ +static const int avl_child2balance[2] = {-1, 1}; +static const int avl_balance2child[] = {0, 0, 1}; + + +/* + * Walk from one node to the previous valued node (ie. an infix walk + * towards the left). At any given node we do one of 2 things: + * + * - If there is a left child, go to it, then to it's rightmost descendant. + * + * - otherwise we return thru parent nodes until we've come from a right child. + * + * Return Value: + * NULL - if at the end of the nodes + * otherwise next node + */ +void * +avl_walk(avl_tree_t *tree, void *oldnode, int left) +{ + size_t off = tree->avl_offset; + avl_node_t *node = AVL_DATA2NODE(oldnode, off); + int right = 1 - left; + int was_child; + + + /* + * nowhere to walk to if tree is empty + */ + if (node == NULL) + return (NULL); + + /* + * Visit the previous valued node. There are two possibilities: + * + * If this node has a left child, go down one left, then all + * the way right. + */ + if (node->avl_child[left] != NULL) { + for (node = node->avl_child[left]; + node->avl_child[right] != NULL; + node = node->avl_child[right]) + ; + /* + * Otherwise, return thru left children as far as we can. + */ + } else { + for (;;) { + was_child = AVL_XCHILD(node); + node = AVL_XPARENT(node); + if (node == NULL) + return (NULL); + if (was_child == right) + break; + } + } + + return (AVL_NODE2DATA(node, off)); +} + +/* + * Return the lowest valued node in a tree or NULL. + * (leftmost child from root of tree) + */ +void * +avl_first(avl_tree_t *tree) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; node = node->avl_child[0]) + prev = node; + + if (prev != NULL) + return (AVL_NODE2DATA(prev, off)); + return (NULL); +} + +/* + * Return the highest valued node in a tree or NULL. + * (rightmost child from root of tree) + */ +void * +avl_last(avl_tree_t *tree) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; node = node->avl_child[1]) + prev = node; + + if (prev != NULL) + return (AVL_NODE2DATA(prev, off)); + return (NULL); +} + +/* + * Access the node immediately before or after an insertion point. + * + * "avl_index_t" is a (avl_node_t *) with the bottom bit indicating a child + * + * Return value: + * NULL: no node in the given direction + * "void *" of the found tree node + */ +void * +avl_nearest(avl_tree_t *tree, avl_index_t where, int direction) +{ + int child = AVL_INDEX2CHILD(where); + avl_node_t *node = AVL_INDEX2NODE(where); + void *data; + size_t off = tree->avl_offset; + + if (node == NULL) { + ASSERT(tree->avl_root == NULL); + return (NULL); + } + data = AVL_NODE2DATA(node, off); + if (child != direction) + return (data); + + return (avl_walk(tree, data, direction)); +} + + +/* + * Search for the node which contains "value". The algorithm is a + * simple binary tree search. + * + * return value: + * NULL: the value is not in the AVL tree + * *where (if not NULL) is set to indicate the insertion point + * "void *" of the found tree node + */ +void * +avl_find(avl_tree_t *tree, void *value, avl_index_t *where) +{ + avl_node_t *node; + avl_node_t *prev = NULL; + int child = 0; + int diff; + size_t off = tree->avl_offset; + + for (node = tree->avl_root; node != NULL; + node = node->avl_child[child]) { + + prev = node; + + diff = tree->avl_compar(value, AVL_NODE2DATA(node, off)); + ASSERT(-1 <= diff && diff <= 1); + if (diff == 0) { +#ifdef DEBUG + if (where != NULL) + *where = 0; +#endif + return (AVL_NODE2DATA(node, off)); + } + child = avl_balance2child[1 + diff]; + + } + + if (where != NULL) + *where = AVL_MKINDEX(prev, child); + + return (NULL); +} + + +/* + * Perform a rotation to restore balance at the subtree given by depth. + * + * This routine is used by both insertion and deletion. The return value + * indicates: + * 0 : subtree did not change height + * !0 : subtree was reduced in height + * + * The code is written as if handling left rotations, right rotations are + * symmetric and handled by swapping values of variables right/left[_heavy] + * + * On input balance is the "new" balance at "node". This value is either + * -2 or +2. + */ +static int +avl_rotation(avl_tree_t *tree, avl_node_t *node, int balance) +{ + int left = !(balance < 0); /* when balance = -2, left will be 0 */ + int right = 1 - left; + int left_heavy = balance >> 1; + int right_heavy = -left_heavy; + avl_node_t *parent = AVL_XPARENT(node); + avl_node_t *child = node->avl_child[left]; + avl_node_t *cright; + avl_node_t *gchild; + avl_node_t *gright; + avl_node_t *gleft; + int which_child = AVL_XCHILD(node); + int child_bal = AVL_XBALANCE(child); + + /* BEGIN CSTYLED */ + /* + * case 1 : node is overly left heavy, the left child is balanced or + * also left heavy. This requires the following rotation. + * + * (node bal:-2) + * / \ + * / \ + * (child bal:0 or -1) + * / \ + * / \ + * cright + * + * becomes: + * + * (child bal:1 or 0) + * / \ + * / \ + * (node bal:-1 or 0) + * / \ + * / \ + * cright + * + * we detect this situation by noting that child's balance is not + * right_heavy. + */ + /* END CSTYLED */ + if (child_bal != right_heavy) { + + /* + * compute new balance of nodes + * + * If child used to be left heavy (now balanced) we reduced + * the height of this sub-tree -- used in "return...;" below + */ + child_bal += right_heavy; /* adjust towards right */ + + /* + * move "cright" to be node's left child + */ + cright = child->avl_child[right]; + node->avl_child[left] = cright; + if (cright != NULL) { + AVL_SETPARENT(cright, node); + AVL_SETCHILD(cright, left); + } + + /* + * move node to be child's right child + */ + child->avl_child[right] = node; + AVL_SETBALANCE(node, -child_bal); + AVL_SETCHILD(node, right); + AVL_SETPARENT(node, child); + + /* + * update the pointer into this subtree + */ + AVL_SETBALANCE(child, child_bal); + AVL_SETCHILD(child, which_child); + AVL_SETPARENT(child, parent); + if (parent != NULL) + parent->avl_child[which_child] = child; + else + tree->avl_root = child; + + return (child_bal == 0); + } + + /* BEGIN CSTYLED */ + /* + * case 2 : When node is left heavy, but child is right heavy we use + * a different rotation. + * + * (node b:-2) + * / \ + * / \ + * / \ + * (child b:+1) + * / \ + * / \ + * (gchild b: != 0) + * / \ + * / \ + * gleft gright + * + * becomes: + * + * (gchild b:0) + * / \ + * / \ + * / \ + * (child b:?) (node b:?) + * / \ / \ + * / \ / \ + * gleft gright + * + * computing the new balances is more complicated. As an example: + * if gchild was right_heavy, then child is now left heavy + * else it is balanced + */ + /* END CSTYLED */ + gchild = child->avl_child[right]; + gleft = gchild->avl_child[left]; + gright = gchild->avl_child[right]; + + /* + * move gright to left child of node and + * + * move gleft to right child of node + */ + node->avl_child[left] = gright; + if (gright != NULL) { + AVL_SETPARENT(gright, node); + AVL_SETCHILD(gright, left); + } + + child->avl_child[right] = gleft; + if (gleft != NULL) { + AVL_SETPARENT(gleft, child); + AVL_SETCHILD(gleft, right); + } + + /* + * move child to left child of gchild and + * + * move node to right child of gchild and + * + * fixup parent of all this to point to gchild + */ + balance = AVL_XBALANCE(gchild); + gchild->avl_child[left] = child; + AVL_SETBALANCE(child, (balance == right_heavy ? left_heavy : 0)); + AVL_SETPARENT(child, gchild); + AVL_SETCHILD(child, left); + + gchild->avl_child[right] = node; + AVL_SETBALANCE(node, (balance == left_heavy ? right_heavy : 0)); + AVL_SETPARENT(node, gchild); + AVL_SETCHILD(node, right); + + AVL_SETBALANCE(gchild, 0); + AVL_SETPARENT(gchild, parent); + AVL_SETCHILD(gchild, which_child); + if (parent != NULL) + parent->avl_child[which_child] = gchild; + else + tree->avl_root = gchild; + + return (1); /* the new tree is always shorter */ +} + + +/* + * Insert a new node into an AVL tree at the specified (from avl_find()) place. + * + * Newly inserted nodes are always leaf nodes in the tree, since avl_find() + * searches out to the leaf positions. The avl_index_t indicates the node + * which will be the parent of the new node. + * + * After the node is inserted, a single rotation further up the tree may + * be necessary to maintain an acceptable AVL balance. + */ +void +avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where) +{ + avl_node_t *node; + avl_node_t *parent = AVL_INDEX2NODE(where); + int old_balance; + int new_balance; + int which_child = AVL_INDEX2CHILD(where); + size_t off = tree->avl_offset; + + ASSERT(tree); +#ifdef _LP64 + ASSERT(((uintptr_t)new_data & 0x7) == 0); +#endif + + node = AVL_DATA2NODE(new_data, off); + + /* + * First, add the node to the tree at the indicated position. + */ + ++tree->avl_numnodes; + + node->avl_child[0] = NULL; + node->avl_child[1] = NULL; + + AVL_SETCHILD(node, which_child); + AVL_SETBALANCE(node, 0); + AVL_SETPARENT(node, parent); + if (parent != NULL) { + ASSERT(parent->avl_child[which_child] == NULL); + parent->avl_child[which_child] = node; + } else { + ASSERT(tree->avl_root == NULL); + tree->avl_root = node; + } + /* + * Now, back up the tree modifying the balance of all nodes above the + * insertion point. If we get to a highly unbalanced ancestor, we + * need to do a rotation. If we back out of the tree we are done. + * If we brought any subtree into perfect balance (0), we are also done. + */ + for (;;) { + node = parent; + if (node == NULL) + return; + + /* + * Compute the new balance + */ + old_balance = AVL_XBALANCE(node); + new_balance = old_balance + avl_child2balance[which_child]; + + /* + * If we introduced equal balance, then we are done immediately + */ + if (new_balance == 0) { + AVL_SETBALANCE(node, 0); + return; + } + + /* + * If both old and new are not zero we went + * from -1 to -2 balance, do a rotation. + */ + if (old_balance != 0) + break; + + AVL_SETBALANCE(node, new_balance); + parent = AVL_XPARENT(node); + which_child = AVL_XCHILD(node); + } + + /* + * perform a rotation to fix the tree and return + */ + (void) avl_rotation(tree, node, new_balance); +} + +/* + * Insert "new_data" in "tree" in the given "direction" either after or + * before (AVL_AFTER, AVL_BEFORE) the data "here". + * + * Insertions can only be done at empty leaf points in the tree, therefore + * if the given child of the node is already present we move to either + * the AVL_PREV or AVL_NEXT and reverse the insertion direction. Since + * every other node in the tree is a leaf, this always works. + * + * To help developers using this interface, we assert that the new node + * is correctly ordered at every step of the way in DEBUG kernels. + */ +void +avl_insert_here( + avl_tree_t *tree, + void *new_data, + void *here, + int direction) +{ + avl_node_t *node; + int child = direction; /* rely on AVL_BEFORE == 0, AVL_AFTER == 1 */ +#ifdef DEBUG + int diff; +#endif + + ASSERT(tree != NULL); + ASSERT(new_data != NULL); + ASSERT(here != NULL); + ASSERT(direction == AVL_BEFORE || direction == AVL_AFTER); + + /* + * If corresponding child of node is not NULL, go to the neighboring + * node and reverse the insertion direction. + */ + node = AVL_DATA2NODE(here, tree->avl_offset); + +#ifdef DEBUG + diff = tree->avl_compar(new_data, here); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + + if (node->avl_child[child] != NULL) { + node = node->avl_child[child]; + child = 1 - child; + while (node->avl_child[child] != NULL) { +#ifdef DEBUG + diff = tree->avl_compar(new_data, + AVL_NODE2DATA(node, tree->avl_offset)); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + node = node->avl_child[child]; + } +#ifdef DEBUG + diff = tree->avl_compar(new_data, + AVL_NODE2DATA(node, tree->avl_offset)); + ASSERT(-1 <= diff && diff <= 1); + ASSERT(diff != 0); + ASSERT(diff > 0 ? child == 1 : child == 0); +#endif + } + ASSERT(node->avl_child[child] == NULL); + + avl_insert(tree, new_data, AVL_MKINDEX(node, child)); +} + +/* + * Add a new node to an AVL tree. + */ +void +avl_add(avl_tree_t *tree, void *new_node) +{ + avl_index_t where; + + /* + * This is unfortunate. We want to call panic() here, even for + * non-DEBUG kernels. In userland, however, we can't depend on anything + * in libc or else the rtld build process gets confused. So, all we can + * do in userland is resort to a normal ASSERT(). + */ + if (avl_find(tree, new_node, &where) != NULL) +#ifdef _KERNEL + panic("avl_find() succeeded inside avl_add()"); +#else + ASSERT(0); +#endif + avl_insert(tree, new_node, where); +} + +/* + * Delete a node from the AVL tree. Deletion is similar to insertion, but + * with 2 complications. + * + * First, we may be deleting an interior node. Consider the following subtree: + * + * d c c + * / \ / \ / \ + * b e b e b e + * / \ / \ / + * a c a a + * + * When we are deleting node (d), we find and bring up an adjacent valued leaf + * node, say (c), to take the interior node's place. In the code this is + * handled by temporarily swapping (d) and (c) in the tree and then using + * common code to delete (d) from the leaf position. + * + * Secondly, an interior deletion from a deep tree may require more than one + * rotation to fix the balance. This is handled by moving up the tree through + * parents and applying rotations as needed. The return value from + * avl_rotation() is used to detect when a subtree did not change overall + * height due to a rotation. + */ +void +avl_remove(avl_tree_t *tree, void *data) +{ + avl_node_t *delete; + avl_node_t *parent; + avl_node_t *node; + avl_node_t tmp; + int old_balance; + int new_balance; + int left; + int right; + int which_child; + size_t off = tree->avl_offset; + + ASSERT(tree); + + delete = AVL_DATA2NODE(data, off); + + /* + * Deletion is easiest with a node that has at most 1 child. + * We swap a node with 2 children with a sequentially valued + * neighbor node. That node will have at most 1 child. Note this + * has no effect on the ordering of the remaining nodes. + * + * As an optimization, we choose the greater neighbor if the tree + * is right heavy, otherwise the left neighbor. This reduces the + * number of rotations needed. + */ + if (delete->avl_child[0] != NULL && delete->avl_child[1] != NULL) { + + /* + * choose node to swap from whichever side is taller + */ + old_balance = AVL_XBALANCE(delete); + left = avl_balance2child[old_balance + 1]; + right = 1 - left; + + /* + * get to the previous value'd node + * (down 1 left, as far as possible right) + */ + for (node = delete->avl_child[left]; + node->avl_child[right] != NULL; + node = node->avl_child[right]) + ; + + /* + * create a temp placeholder for 'node' + * move 'node' to delete's spot in the tree + */ + tmp = *node; + + *node = *delete; + if (node->avl_child[left] == node) + node->avl_child[left] = &tmp; + + parent = AVL_XPARENT(node); + if (parent != NULL) + parent->avl_child[AVL_XCHILD(node)] = node; + else + tree->avl_root = node; + AVL_SETPARENT(node->avl_child[left], node); + AVL_SETPARENT(node->avl_child[right], node); + + /* + * Put tmp where node used to be (just temporary). + * It always has a parent and at most 1 child. + */ + delete = &tmp; + parent = AVL_XPARENT(delete); + parent->avl_child[AVL_XCHILD(delete)] = delete; + which_child = (delete->avl_child[1] != 0); + if (delete->avl_child[which_child] != NULL) + AVL_SETPARENT(delete->avl_child[which_child], delete); + } + + + /* + * Here we know "delete" is at least partially a leaf node. It can + * be easily removed from the tree. + */ + ASSERT(tree->avl_numnodes > 0); + --tree->avl_numnodes; + parent = AVL_XPARENT(delete); + which_child = AVL_XCHILD(delete); + if (delete->avl_child[0] != NULL) + node = delete->avl_child[0]; + else + node = delete->avl_child[1]; + + /* + * Connect parent directly to node (leaving out delete). + */ + if (node != NULL) { + AVL_SETPARENT(node, parent); + AVL_SETCHILD(node, which_child); + } + if (parent == NULL) { + tree->avl_root = node; + return; + } + parent->avl_child[which_child] = node; + + + /* + * Since the subtree is now shorter, begin adjusting parent balances + * and performing any needed rotations. + */ + do { + + /* + * Move up the tree and adjust the balance + * + * Capture the parent and which_child values for the next + * iteration before any rotations occur. + */ + node = parent; + old_balance = AVL_XBALANCE(node); + new_balance = old_balance - avl_child2balance[which_child]; + parent = AVL_XPARENT(node); + which_child = AVL_XCHILD(node); + + /* + * If a node was in perfect balance but isn't anymore then + * we can stop, since the height didn't change above this point + * due to a deletion. + */ + if (old_balance == 0) { + AVL_SETBALANCE(node, new_balance); + break; + } + + /* + * If the new balance is zero, we don't need to rotate + * else + * need a rotation to fix the balance. + * If the rotation doesn't change the height + * of the sub-tree we have finished adjusting. + */ + if (new_balance == 0) + AVL_SETBALANCE(node, new_balance); + else if (!avl_rotation(tree, node, new_balance)) + break; + } while (parent != NULL); +} + +#define AVL_REINSERT(tree, obj) \ + avl_remove((tree), (obj)); \ + avl_add((tree), (obj)) + +boolean_t +avl_update_lt(avl_tree_t *t, void *obj) +{ + void *neighbor; + + ASSERT(((neighbor = AVL_NEXT(t, obj)) == NULL) || + (t->avl_compar(obj, neighbor) <= 0)); + + neighbor = AVL_PREV(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) < 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +boolean_t +avl_update_gt(avl_tree_t *t, void *obj) +{ + void *neighbor; + + ASSERT(((neighbor = AVL_PREV(t, obj)) == NULL) || + (t->avl_compar(obj, neighbor) >= 0)); + + neighbor = AVL_NEXT(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) > 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +boolean_t +avl_update(avl_tree_t *t, void *obj) +{ + void *neighbor; + + neighbor = AVL_PREV(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) < 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + neighbor = AVL_NEXT(t, obj); + if ((neighbor != NULL) && (t->avl_compar(obj, neighbor) > 0)) { + AVL_REINSERT(t, obj); + return (B_TRUE); + } + + return (B_FALSE); +} + +/* + * initialize a new AVL tree + */ +void +avl_create(avl_tree_t *tree, int (*compar) (const void *, const void *), + size_t size, size_t offset) +{ + ASSERT(tree); + ASSERT(compar); + ASSERT(size > 0); + ASSERT(size >= offset + sizeof (avl_node_t)); +#ifdef _LP64 + ASSERT((offset & 0x7) == 0); +#endif + + tree->avl_compar = compar; + tree->avl_root = NULL; + tree->avl_numnodes = 0; + tree->avl_size = size; + tree->avl_offset = offset; +} + +/* + * Delete a tree. + */ +/* ARGSUSED */ +void +avl_destroy(avl_tree_t *tree) +{ + ASSERT(tree); + ASSERT(tree->avl_numnodes == 0); + ASSERT(tree->avl_root == NULL); +} + + +/* + * Return the number of nodes in an AVL tree. + */ +ulong_t +avl_numnodes(avl_tree_t *tree) +{ + ASSERT(tree); + return (tree->avl_numnodes); +} + +boolean_t +avl_is_empty(avl_tree_t *tree) +{ + ASSERT(tree); + return (tree->avl_numnodes == 0); +} + +#define CHILDBIT (1L) + +/* + * Post-order tree walk used to visit all tree nodes and destroy the tree + * in post order. This is used for destroying a tree w/o paying any cost + * for rebalancing it. + * + * example: + * + * void *cookie = NULL; + * my_data_t *node; + * + * while ((node = avl_destroy_nodes(tree, &cookie)) != NULL) + * free(node); + * avl_destroy(tree); + * + * The cookie is really an avl_node_t to the current node's parent and + * an indication of which child you looked at last. + * + * On input, a cookie value of CHILDBIT indicates the tree is done. + */ +void * +avl_destroy_nodes(avl_tree_t *tree, void **cookie) +{ + avl_node_t *node; + avl_node_t *parent; + int child; + void *first; + size_t off = tree->avl_offset; + + /* + * Initial calls go to the first node or it's right descendant. + */ + if (*cookie == NULL) { + first = avl_first(tree); + + /* + * deal with an empty tree + */ + if (first == NULL) { + *cookie = (void *)CHILDBIT; + return (NULL); + } + + node = AVL_DATA2NODE(first, off); + parent = AVL_XPARENT(node); + goto check_right_side; + } + + /* + * If there is no parent to return to we are done. + */ + parent = (avl_node_t *)((uintptr_t)(*cookie) & ~CHILDBIT); + if (parent == NULL) { + if (tree->avl_root != NULL) { + ASSERT(tree->avl_numnodes == 1); + tree->avl_root = NULL; + tree->avl_numnodes = 0; + } + return (NULL); + } + + /* + * Remove the child pointer we just visited from the parent and tree. + */ + child = (uintptr_t)(*cookie) & CHILDBIT; + parent->avl_child[child] = NULL; + ASSERT(tree->avl_numnodes > 1); + --tree->avl_numnodes; + + /* + * If we just did a right child or there isn't one, go up to parent. + */ + if (child == 1 || parent->avl_child[1] == NULL) { + node = parent; + parent = AVL_XPARENT(parent); + goto done; + } + + /* + * Do parent's right child, then leftmost descendent. + */ + node = parent->avl_child[1]; + while (node->avl_child[0] != NULL) { + parent = node; + node = node->avl_child[0]; + } + + /* + * If here, we moved to a left child. It may have one + * child on the right (when balance == +1). + */ +check_right_side: + if (node->avl_child[1] != NULL) { + ASSERT(AVL_XBALANCE(node) == 1); + parent = node; + node = node->avl_child[1]; + ASSERT(node->avl_child[0] == NULL && + node->avl_child[1] == NULL); + } else { + ASSERT(AVL_XBALANCE(node) <= 0); + } + +done: + if (parent == NULL) { + *cookie = (void *)CHILDBIT; + ASSERT(node == tree->avl_root); + } else { + *cookie = (void *)((uintptr_t)parent | AVL_XCHILD(node)); + } + + return (AVL_NODE2DATA(node, off)); +} diff --git a/external/cddl/osnet/dist/common/nvpair/nvpair.c b/external/cddl/osnet/dist/common/nvpair/nvpair.c new file mode 100644 index 000000000000..77891bf77644 --- /dev/null +++ b/external/cddl/osnet/dist/common/nvpair/nvpair.c @@ -0,0 +1,3246 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_KERNEL) && !defined(_BOOT) +#include +#include +#include +#else +#include +#include +#include +#include +#endif + +#ifndef offsetof +#define offsetof(s, m) ((size_t)(&(((s *)0)->m))) +#endif +#define skip_whitespace(p) while ((*(p) == ' ') || (*(p) == '\t')) p++ + +/* + * nvpair.c - Provides kernel & userland interfaces for manipulating + * name-value pairs. + * + * Overview Diagram + * + * +--------------+ + * | nvlist_t | + * |--------------| + * | nvl_version | + * | nvl_nvflag | + * | nvl_priv -+-+ + * | nvl_flag | | + * | nvl_pad | | + * +--------------+ | + * V + * +--------------+ last i_nvp in list + * | nvpriv_t | +---------------------> + * |--------------| | + * +--+- nvp_list | | +------------+ + * | | nvp_last -+--+ + nv_alloc_t | + * | | nvp_curr | |------------| + * | | nvp_nva -+----> | nva_ops | + * | | nvp_stat | | nva_arg | + * | +--------------+ +------------+ + * | + * +-------+ + * V + * +---------------------+ +-------------------+ + * | i_nvp_t | +-->| i_nvp_t | +--> + * |---------------------| | |-------------------| | + * | nvi_next -+--+ | nvi_next -+--+ + * | nvi_prev (NULL) | <----+ nvi_prev | + * | . . . . . . . . . . | | . . . . . . . . . | + * | nvp (nvpair_t) | | nvp (nvpair_t) | + * | - nvp_size | | - nvp_size | + * | - nvp_name_sz | | - nvp_name_sz | + * | - nvp_value_elem | | - nvp_value_elem | + * | - nvp_type | | - nvp_type | + * | - data ... | | - data ... | + * +---------------------+ +-------------------+ + * + * + * + * +---------------------+ +---------------------+ + * | i_nvp_t | +--> +-->| i_nvp_t (last) | + * |---------------------| | | |---------------------| + * | nvi_next -+--+ ... --+ | nvi_next (NULL) | + * <-+- nvi_prev |<-- ... <----+ nvi_prev | + * | . . . . . . . . . | | . . . . . . . . . | + * | nvp (nvpair_t) | | nvp (nvpair_t) | + * | - nvp_size | | - nvp_size | + * | - nvp_name_sz | | - nvp_name_sz | + * | - nvp_value_elem | | - nvp_value_elem | + * | - DATA_TYPE_NVLIST | | - nvp_type | + * | - data (embedded) | | - data ... | + * | nvlist name | +---------------------+ + * | +--------------+ | + * | | nvlist_t | | + * | |--------------| | + * | | nvl_version | | + * | | nvl_nvflag | | + * | | nvl_priv --+---+----> + * | | nvl_flag | | + * | | nvl_pad | | + * | +--------------+ | + * +---------------------+ + * + * + * N.B. nvpair_t may be aligned on 4 byte boundary, so +4 will + * allow value to be aligned on 8 byte boundary + * + * name_len is the length of the name string including the null terminator + * so it must be >= 1 + */ +#define NVP_SIZE_CALC(name_len, data_len) \ + (NV_ALIGN((sizeof (nvpair_t)) + name_len) + NV_ALIGN(data_len)) + +static int i_get_value_size(data_type_t type, const void *data, uint_t nelem); +static int nvlist_add_common(nvlist_t *nvl, const char *name, data_type_t type, + uint_t nelem, const void *data); + +#define NV_STAT_EMBEDDED 0x1 +#define EMBEDDED_NVL(nvp) ((nvlist_t *)(void *)NVP_VALUE(nvp)) +#define EMBEDDED_NVL_ARRAY(nvp) ((nvlist_t **)(void *)NVP_VALUE(nvp)) + +#define NVP_VALOFF(nvp) (NV_ALIGN(sizeof (nvpair_t) + (nvp)->nvp_name_sz)) +#define NVPAIR2I_NVP(nvp) \ + ((i_nvp_t *)((size_t)(nvp) - offsetof(i_nvp_t, nvi_nvp))) + + +int +nv_alloc_init(nv_alloc_t *nva, const nv_alloc_ops_t *nvo, /* args */ ...) +{ + va_list valist; + int err = 0; + + nva->nva_ops = nvo; + nva->nva_arg = NULL; + + va_start(valist, nvo); + if (nva->nva_ops->nv_ao_init != NULL) + err = nva->nva_ops->nv_ao_init(nva, valist); + va_end(valist); + + return (err); +} + +void +nv_alloc_reset(nv_alloc_t *nva) +{ + if (nva->nva_ops->nv_ao_reset != NULL) + nva->nva_ops->nv_ao_reset(nva); +} + +void +nv_alloc_fini(nv_alloc_t *nva) +{ + if (nva->nva_ops->nv_ao_fini != NULL) + nva->nva_ops->nv_ao_fini(nva); +} + +nv_alloc_t * +nvlist_lookup_nv_alloc(nvlist_t *nvl) +{ + nvpriv_t *priv; + + if (nvl == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (NULL); + + return (priv->nvp_nva); +} + +static void * +nv_mem_zalloc(nvpriv_t *nvp, size_t size) +{ + nv_alloc_t *nva = nvp->nvp_nva; + void *buf; + + if ((buf = nva->nva_ops->nv_ao_alloc(nva, size)) != NULL) + bzero(buf, size); + + return (buf); +} + +static void +nv_mem_free(nvpriv_t *nvp, void *buf, size_t size) +{ + nv_alloc_t *nva = nvp->nvp_nva; + + nva->nva_ops->nv_ao_free(nva, buf, size); +} + +static void +nv_priv_init(nvpriv_t *priv, nv_alloc_t *nva, uint32_t stat) +{ + bzero(priv, sizeof (nvpriv_t)); + + priv->nvp_nva = nva; + priv->nvp_stat = stat; +} + +static nvpriv_t * +nv_priv_alloc(nv_alloc_t *nva) +{ + nvpriv_t *priv; + + /* + * nv_mem_alloc() cannot called here because it needs the priv + * argument. + */ + if ((priv = nva->nva_ops->nv_ao_alloc(nva, sizeof (nvpriv_t))) == NULL) + return (NULL); + + nv_priv_init(priv, nva, 0); + + return (priv); +} + +/* + * Embedded lists need their own nvpriv_t's. We create a new + * nvpriv_t using the parameters and allocator from the parent + * list's nvpriv_t. + */ +static nvpriv_t * +nv_priv_alloc_embedded(nvpriv_t *priv) +{ + nvpriv_t *emb_priv; + + if ((emb_priv = nv_mem_zalloc(priv, sizeof (nvpriv_t))) == NULL) + return (NULL); + + nv_priv_init(emb_priv, priv->nvp_nva, NV_STAT_EMBEDDED); + + return (emb_priv); +} + +static void +nvlist_init(nvlist_t *nvl, uint32_t nvflag, nvpriv_t *priv) +{ + nvl->nvl_version = NV_VERSION; + nvl->nvl_nvflag = nvflag & (NV_UNIQUE_NAME|NV_UNIQUE_NAME_TYPE); + nvl->nvl_priv = (uint64_t)(uintptr_t)priv; + nvl->nvl_flag = 0; + nvl->nvl_pad = 0; +} + +/* + * nvlist_alloc - Allocate nvlist. + */ +/*ARGSUSED1*/ +int +nvlist_alloc(nvlist_t **nvlp, uint_t nvflag, int kmflag) +{ +#if defined(_KERNEL) && !defined(_BOOT) + return (nvlist_xalloc(nvlp, nvflag, + (kmflag == KM_SLEEP ? nv_alloc_sleep : nv_alloc_nosleep))); +#else + return (nvlist_xalloc(nvlp, nvflag, nv_alloc_nosleep)); +#endif +} + +int +nvlist_xalloc(nvlist_t **nvlp, uint_t nvflag, nv_alloc_t *nva) +{ + nvpriv_t *priv; + + if (nvlp == NULL || nva == NULL) + return (EINVAL); + + if ((priv = nv_priv_alloc(nva)) == NULL) + return (ENOMEM); + + if ((*nvlp = nv_mem_zalloc(priv, + NV_ALIGN(sizeof (nvlist_t)))) == NULL) { + nv_mem_free(priv, priv, sizeof (nvpriv_t)); + return (ENOMEM); + } + + nvlist_init(*nvlp, nvflag, priv); + + return (0); +} + +/* + * nvp_buf_alloc - Allocate i_nvp_t for storing a new nv pair. + */ +static nvpair_t * +nvp_buf_alloc(nvlist_t *nvl, size_t len) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *buf; + nvpair_t *nvp; + size_t nvsize; + + /* + * Allocate the buffer + */ + nvsize = len + offsetof(i_nvp_t, nvi_nvp); + + if ((buf = nv_mem_zalloc(priv, nvsize)) == NULL) + return (NULL); + + nvp = &buf->nvi_nvp; + nvp->nvp_size = len; + + return (nvp); +} + +/* + * nvp_buf_free - de-Allocate an i_nvp_t. + */ +static void +nvp_buf_free(nvlist_t *nvl, nvpair_t *nvp) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + size_t nvsize = nvp->nvp_size + offsetof(i_nvp_t, nvi_nvp); + + nv_mem_free(priv, NVPAIR2I_NVP(nvp), nvsize); +} + +/* + * nvp_buf_link - link a new nv pair into the nvlist. + */ +static void +nvp_buf_link(nvlist_t *nvl, nvpair_t *nvp) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *curr = NVPAIR2I_NVP(nvp); + + /* Put element at end of nvlist */ + if (priv->nvp_list == NULL) { + priv->nvp_list = priv->nvp_last = curr; + } else { + curr->nvi_prev = priv->nvp_last; + priv->nvp_last->nvi_next = curr; + priv->nvp_last = curr; + } +} + +/* + * nvp_buf_unlink - unlink an removed nvpair out of the nvlist. + */ +static void +nvp_buf_unlink(nvlist_t *nvl, nvpair_t *nvp) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *curr = NVPAIR2I_NVP(nvp); + + /* + * protect nvlist_next_nvpair() against walking on freed memory. + */ + if (priv->nvp_curr == curr) + priv->nvp_curr = curr->nvi_next; + + if (curr == priv->nvp_list) + priv->nvp_list = curr->nvi_next; + else + curr->nvi_prev->nvi_next = curr->nvi_next; + + if (curr == priv->nvp_last) + priv->nvp_last = curr->nvi_prev; + else + curr->nvi_next->nvi_prev = curr->nvi_prev; +} + +/* + * take a nvpair type and number of elements and make sure the are valid + */ +static int +i_validate_type_nelem(data_type_t type, uint_t nelem) +{ + switch (type) { + case DATA_TYPE_BOOLEAN: + if (nelem != 0) + return (EINVAL); + break; + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + case DATA_TYPE_INT16: + case DATA_TYPE_UINT16: + case DATA_TYPE_INT32: + case DATA_TYPE_UINT32: + case DATA_TYPE_INT64: + case DATA_TYPE_UINT64: + case DATA_TYPE_STRING: + case DATA_TYPE_HRTIME: + case DATA_TYPE_NVLIST: +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: +#endif + if (nelem != 1) + return (EINVAL); + break; + case DATA_TYPE_BOOLEAN_ARRAY: + case DATA_TYPE_BYTE_ARRAY: + case DATA_TYPE_INT8_ARRAY: + case DATA_TYPE_UINT8_ARRAY: + case DATA_TYPE_INT16_ARRAY: + case DATA_TYPE_UINT16_ARRAY: + case DATA_TYPE_INT32_ARRAY: + case DATA_TYPE_UINT32_ARRAY: + case DATA_TYPE_INT64_ARRAY: + case DATA_TYPE_UINT64_ARRAY: + case DATA_TYPE_STRING_ARRAY: + case DATA_TYPE_NVLIST_ARRAY: + /* we allow arrays with 0 elements */ + break; + default: + return (EINVAL); + } + return (0); +} + +/* + * Verify nvp_name_sz and check the name string length. + */ +static int +i_validate_nvpair_name(nvpair_t *nvp) +{ + if ((nvp->nvp_name_sz <= 0) || + (nvp->nvp_size < NVP_SIZE_CALC(nvp->nvp_name_sz, 0))) + return (EFAULT); + + /* verify the name string, make sure its terminated */ + if (NVP_NAME(nvp)[nvp->nvp_name_sz - 1] != '\0') + return (EFAULT); + + return (strlen(NVP_NAME(nvp)) == nvp->nvp_name_sz - 1 ? 0 : EFAULT); +} + +static int +i_validate_nvpair_value(data_type_t type, uint_t nelem, const void *data) +{ + switch (type) { + case DATA_TYPE_BOOLEAN_VALUE: + if (*(boolean_t *)data != B_TRUE && + *(boolean_t *)data != B_FALSE) + return (EINVAL); + break; + case DATA_TYPE_BOOLEAN_ARRAY: { + int i; + + for (i = 0; i < nelem; i++) + if (((boolean_t *)data)[i] != B_TRUE && + ((boolean_t *)data)[i] != B_FALSE) + return (EINVAL); + break; + } + default: + break; + } + + return (0); +} + +/* + * This function takes a pointer to what should be a nvpair and it's size + * and then verifies that all the nvpair fields make sense and can be + * trusted. This function is used when decoding packed nvpairs. + */ +static int +i_validate_nvpair(nvpair_t *nvp) +{ + data_type_t type = NVP_TYPE(nvp); + int size1, size2; + + /* verify nvp_name_sz, check the name string length */ + if (i_validate_nvpair_name(nvp) != 0) + return (EFAULT); + + if (i_validate_nvpair_value(type, NVP_NELEM(nvp), NVP_VALUE(nvp)) != 0) + return (EFAULT); + + /* + * verify nvp_type, nvp_value_elem, and also possibly + * verify string values and get the value size. + */ + size2 = i_get_value_size(type, NVP_VALUE(nvp), NVP_NELEM(nvp)); + size1 = nvp->nvp_size - NVP_VALOFF(nvp); + if (size2 < 0 || size1 != NV_ALIGN(size2)) + return (EFAULT); + + return (0); +} + +static int +nvlist_copy_pairs(nvlist_t *snvl, nvlist_t *dnvl) +{ + nvpriv_t *priv; + i_nvp_t *curr; + + if ((priv = (nvpriv_t *)(uintptr_t)snvl->nvl_priv) == NULL) + return (EINVAL); + + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) { + nvpair_t *nvp = &curr->nvi_nvp; + int err; + + if ((err = nvlist_add_common(dnvl, NVP_NAME(nvp), NVP_TYPE(nvp), + NVP_NELEM(nvp), NVP_VALUE(nvp))) != 0) + return (err); + } + + return (0); +} + +/* + * Frees all memory allocated for an nvpair (like embedded lists) with + * the exception of the nvpair buffer itself. + */ +static void +nvpair_free(nvpair_t *nvp) +{ + switch (NVP_TYPE(nvp)) { + case DATA_TYPE_NVLIST: + nvlist_free(EMBEDDED_NVL(nvp)); + break; + case DATA_TYPE_NVLIST_ARRAY: { + nvlist_t **nvlp = EMBEDDED_NVL_ARRAY(nvp); + int i; + + for (i = 0; i < NVP_NELEM(nvp); i++) + if (nvlp[i] != NULL) + nvlist_free(nvlp[i]); + break; + } + default: + break; + } +} + +/* + * nvlist_free - free an unpacked nvlist + */ +void +nvlist_free(nvlist_t *nvl) +{ + nvpriv_t *priv; + i_nvp_t *curr; + + if (nvl == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return; + + /* + * Unpacked nvlist are linked through i_nvp_t + */ + curr = priv->nvp_list; + while (curr != NULL) { + nvpair_t *nvp = &curr->nvi_nvp; + curr = curr->nvi_next; + + nvpair_free(nvp); + nvp_buf_free(nvl, nvp); + } + + if (!(priv->nvp_stat & NV_STAT_EMBEDDED)) + nv_mem_free(priv, nvl, NV_ALIGN(sizeof (nvlist_t))); + else + nvl->nvl_priv = 0; + + nv_mem_free(priv, priv, sizeof (nvpriv_t)); +} + +static int +nvlist_contains_nvp(nvlist_t *nvl, nvpair_t *nvp) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *curr; + + if (nvp == NULL) + return (0); + + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) + if (&curr->nvi_nvp == nvp) + return (1); + + return (0); +} + +/* + * Make a copy of nvlist + */ +/*ARGSUSED1*/ +int +nvlist_dup(nvlist_t *nvl, nvlist_t **nvlp, int kmflag) +{ +#if defined(_KERNEL) && !defined(_BOOT) + return (nvlist_xdup(nvl, nvlp, + (kmflag == KM_SLEEP ? nv_alloc_sleep : nv_alloc_nosleep))); +#else + return (nvlist_xdup(nvl, nvlp, nv_alloc_nosleep)); +#endif +} + +int +nvlist_xdup(nvlist_t *nvl, nvlist_t **nvlp, nv_alloc_t *nva) +{ + int err; + nvlist_t *ret; + + if (nvl == NULL || nvlp == NULL) + return (EINVAL); + + if ((err = nvlist_xalloc(&ret, nvl->nvl_nvflag, nva)) != 0) + return (err); + + if ((err = nvlist_copy_pairs(nvl, ret)) != 0) + nvlist_free(ret); + else + *nvlp = ret; + + return (err); +} + +/* + * Remove all with matching name + */ +int +nvlist_remove_all(nvlist_t *nvl, const char *name) +{ + nvpriv_t *priv; + i_nvp_t *curr; + int error = ENOENT; + + if (nvl == NULL || name == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (EINVAL); + + curr = priv->nvp_list; + while (curr != NULL) { + nvpair_t *nvp = &curr->nvi_nvp; + + curr = curr->nvi_next; + if (strcmp(name, NVP_NAME(nvp)) != 0) + continue; + + nvp_buf_unlink(nvl, nvp); + nvpair_free(nvp); + nvp_buf_free(nvl, nvp); + + error = 0; + } + + return (error); +} + +/* + * Remove first one with matching name and type + */ +int +nvlist_remove(nvlist_t *nvl, const char *name, data_type_t type) +{ + nvpriv_t *priv; + i_nvp_t *curr; + + if (nvl == NULL || name == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (EINVAL); + + curr = priv->nvp_list; + while (curr != NULL) { + nvpair_t *nvp = &curr->nvi_nvp; + + if (strcmp(name, NVP_NAME(nvp)) == 0 && NVP_TYPE(nvp) == type) { + nvp_buf_unlink(nvl, nvp); + nvpair_free(nvp); + nvp_buf_free(nvl, nvp); + + return (0); + } + curr = curr->nvi_next; + } + + return (ENOENT); +} + +/* + * This function calculates the size of an nvpair value. + * + * The data argument controls the behavior in case of the data types + * DATA_TYPE_STRING and + * DATA_TYPE_STRING_ARRAY + * Is data == NULL then the size of the string(s) is excluded. + */ +static int +i_get_value_size(data_type_t type, const void *data, uint_t nelem) +{ + uint64_t value_sz; + + if (i_validate_type_nelem(type, nelem) != 0) + return (-1); + + /* Calculate required size for holding value */ + switch (type) { + case DATA_TYPE_BOOLEAN: + value_sz = 0; + break; + case DATA_TYPE_BOOLEAN_VALUE: + value_sz = sizeof (boolean_t); + break; + case DATA_TYPE_BYTE: + value_sz = sizeof (uchar_t); + break; + case DATA_TYPE_INT8: + value_sz = sizeof (int8_t); + break; + case DATA_TYPE_UINT8: + value_sz = sizeof (uint8_t); + break; + case DATA_TYPE_INT16: + value_sz = sizeof (int16_t); + break; + case DATA_TYPE_UINT16: + value_sz = sizeof (uint16_t); + break; + case DATA_TYPE_INT32: + value_sz = sizeof (int32_t); + break; + case DATA_TYPE_UINT32: + value_sz = sizeof (uint32_t); + break; + case DATA_TYPE_INT64: + value_sz = sizeof (int64_t); + break; + case DATA_TYPE_UINT64: + value_sz = sizeof (uint64_t); + break; +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: + value_sz = sizeof (double); + break; +#endif + case DATA_TYPE_STRING: + if (data == NULL) + value_sz = 0; + else + value_sz = strlen(data) + 1; + break; + case DATA_TYPE_BOOLEAN_ARRAY: + value_sz = (uint64_t)nelem * sizeof (boolean_t); + break; + case DATA_TYPE_BYTE_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uchar_t); + break; + case DATA_TYPE_INT8_ARRAY: + value_sz = (uint64_t)nelem * sizeof (int8_t); + break; + case DATA_TYPE_UINT8_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint8_t); + break; + case DATA_TYPE_INT16_ARRAY: + value_sz = (uint64_t)nelem * sizeof (int16_t); + break; + case DATA_TYPE_UINT16_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint16_t); + break; + case DATA_TYPE_INT32_ARRAY: + value_sz = (uint64_t)nelem * sizeof (int32_t); + break; + case DATA_TYPE_UINT32_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint32_t); + break; + case DATA_TYPE_INT64_ARRAY: + value_sz = (uint64_t)nelem * sizeof (int64_t); + break; + case DATA_TYPE_UINT64_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint64_t); + break; + case DATA_TYPE_STRING_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint64_t); + + if (data != NULL) { + char *const *strs = data; + uint_t i; + + /* no alignment requirement for strings */ + for (i = 0; i < nelem; i++) { + if (strs[i] == NULL) + return (-1); + value_sz += strlen(strs[i]) + 1; + } + } + break; + case DATA_TYPE_HRTIME: + value_sz = sizeof (hrtime_t); + break; + case DATA_TYPE_NVLIST: + value_sz = NV_ALIGN(sizeof (nvlist_t)); + break; + case DATA_TYPE_NVLIST_ARRAY: + value_sz = (uint64_t)nelem * sizeof (uint64_t) + + (uint64_t)nelem * NV_ALIGN(sizeof (nvlist_t)); + break; + default: + return (-1); + } + + return (value_sz > INT32_MAX ? -1 : (int)value_sz); +} + +static int +nvlist_copy_embedded(nvlist_t *nvl, nvlist_t *onvl, nvlist_t *emb_nvl) +{ + nvpriv_t *priv; + int err; + + if ((priv = nv_priv_alloc_embedded((nvpriv_t *)(uintptr_t) + nvl->nvl_priv)) == NULL) + return (ENOMEM); + + nvlist_init(emb_nvl, onvl->nvl_nvflag, priv); + + if ((err = nvlist_copy_pairs(onvl, emb_nvl)) != 0) { + nvlist_free(emb_nvl); + emb_nvl->nvl_priv = 0; + } + + return (err); +} + +/* + * nvlist_add_common - Add new pair to nvlist + */ +static int +nvlist_add_common(nvlist_t *nvl, const char *name, + data_type_t type, uint_t nelem, const void *data) +{ + nvpair_t *nvp; + uint_t i; + + int nvp_sz, name_sz, value_sz; + int err = 0; + + if (name == NULL || nvl == NULL || nvl->nvl_priv == 0) + return (EINVAL); + + if (nelem != 0 && data == NULL) + return (EINVAL); + + /* + * Verify type and nelem and get the value size. + * In case of data types DATA_TYPE_STRING and DATA_TYPE_STRING_ARRAY + * is the size of the string(s) included. + */ + if ((value_sz = i_get_value_size(type, data, nelem)) < 0) + return (EINVAL); + + if (i_validate_nvpair_value(type, nelem, data) != 0) + return (EINVAL); + + /* + * If we're adding an nvlist or nvlist array, ensure that we are not + * adding the input nvlist to itself, which would cause recursion, + * and ensure that no NULL nvlist pointers are present. + */ + switch (type) { + case DATA_TYPE_NVLIST: + if (data == nvl || data == NULL) + return (EINVAL); + break; + case DATA_TYPE_NVLIST_ARRAY: { + nvlist_t **onvlp = (nvlist_t **)data; + for (i = 0; i < nelem; i++) { + if (onvlp[i] == nvl || onvlp[i] == NULL) + return (EINVAL); + } + break; + } + default: + break; + } + + /* calculate sizes of the nvpair elements and the nvpair itself */ + name_sz = strlen(name) + 1; + + nvp_sz = NVP_SIZE_CALC(name_sz, value_sz); + + if ((nvp = nvp_buf_alloc(nvl, nvp_sz)) == NULL) + return (ENOMEM); + + ASSERT(nvp->nvp_size == nvp_sz); + nvp->nvp_name_sz = name_sz; + nvp->nvp_value_elem = nelem; + nvp->nvp_type = type; + bcopy(name, NVP_NAME(nvp), name_sz); + + switch (type) { + case DATA_TYPE_BOOLEAN: + break; + case DATA_TYPE_STRING_ARRAY: { + char *const *strs = data; + char *buf = NVP_VALUE(nvp); + char **cstrs = (void *)buf; + + /* skip pre-allocated space for pointer array */ + buf += nelem * sizeof (uint64_t); + for (i = 0; i < nelem; i++) { + int slen = strlen(strs[i]) + 1; + bcopy(strs[i], buf, slen); + cstrs[i] = buf; + buf += slen; + } + break; + } + case DATA_TYPE_NVLIST: { + nvlist_t *nnvl = EMBEDDED_NVL(nvp); + nvlist_t *onvl = (nvlist_t *)data; + + if ((err = nvlist_copy_embedded(nvl, onvl, nnvl)) != 0) { + nvp_buf_free(nvl, nvp); + return (err); + } + break; + } + case DATA_TYPE_NVLIST_ARRAY: { + nvlist_t **onvlp = (nvlist_t **)data; + nvlist_t **nvlp = EMBEDDED_NVL_ARRAY(nvp); + nvlist_t *embedded = (nvlist_t *) + ((uintptr_t)nvlp + nelem * sizeof (uint64_t)); + + for (i = 0; i < nelem; i++) { + if ((err = nvlist_copy_embedded(nvl, + onvlp[i], embedded)) != 0) { + /* + * Free any successfully created lists + */ + nvpair_free(nvp); + nvp_buf_free(nvl, nvp); + return (err); + } + + nvlp[i] = embedded++; + } + break; + } + default: + bcopy(data, NVP_VALUE(nvp), value_sz); + } + + /* if unique name, remove before add */ + if (nvl->nvl_nvflag & NV_UNIQUE_NAME) + (void) nvlist_remove_all(nvl, name); + else if (nvl->nvl_nvflag & NV_UNIQUE_NAME_TYPE) + (void) nvlist_remove(nvl, name, type); + + nvp_buf_link(nvl, nvp); + + return (0); +} + +int +nvlist_add_boolean(nvlist_t *nvl, const char *name) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_BOOLEAN, 0, NULL)); +} + +int +nvlist_add_boolean_value(nvlist_t *nvl, const char *name, boolean_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_BOOLEAN_VALUE, 1, &val)); +} + +int +nvlist_add_byte(nvlist_t *nvl, const char *name, uchar_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_BYTE, 1, &val)); +} + +int +nvlist_add_int8(nvlist_t *nvl, const char *name, int8_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT8, 1, &val)); +} + +int +nvlist_add_uint8(nvlist_t *nvl, const char *name, uint8_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT8, 1, &val)); +} + +int +nvlist_add_int16(nvlist_t *nvl, const char *name, int16_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT16, 1, &val)); +} + +int +nvlist_add_uint16(nvlist_t *nvl, const char *name, uint16_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT16, 1, &val)); +} + +int +nvlist_add_int32(nvlist_t *nvl, const char *name, int32_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT32, 1, &val)); +} + +int +nvlist_add_uint32(nvlist_t *nvl, const char *name, uint32_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT32, 1, &val)); +} + +int +nvlist_add_int64(nvlist_t *nvl, const char *name, int64_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT64, 1, &val)); +} + +int +nvlist_add_uint64(nvlist_t *nvl, const char *name, uint64_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT64, 1, &val)); +} + +#if !defined(_KERNEL) +int +nvlist_add_double(nvlist_t *nvl, const char *name, double val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_DOUBLE, 1, &val)); +} +#endif + +int +nvlist_add_string(nvlist_t *nvl, const char *name, const char *val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_STRING, 1, (void *)val)); +} + +int +nvlist_add_boolean_array(nvlist_t *nvl, const char *name, + boolean_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_BOOLEAN_ARRAY, n, a)); +} + +int +nvlist_add_byte_array(nvlist_t *nvl, const char *name, uchar_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_BYTE_ARRAY, n, a)); +} + +int +nvlist_add_int8_array(nvlist_t *nvl, const char *name, int8_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT8_ARRAY, n, a)); +} + +int +nvlist_add_uint8_array(nvlist_t *nvl, const char *name, uint8_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT8_ARRAY, n, a)); +} + +int +nvlist_add_int16_array(nvlist_t *nvl, const char *name, int16_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT16_ARRAY, n, a)); +} + +int +nvlist_add_uint16_array(nvlist_t *nvl, const char *name, uint16_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT16_ARRAY, n, a)); +} + +int +nvlist_add_int32_array(nvlist_t *nvl, const char *name, int32_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT32_ARRAY, n, a)); +} + +int +nvlist_add_uint32_array(nvlist_t *nvl, const char *name, uint32_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT32_ARRAY, n, a)); +} + +int +nvlist_add_int64_array(nvlist_t *nvl, const char *name, int64_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_INT64_ARRAY, n, a)); +} + +int +nvlist_add_uint64_array(nvlist_t *nvl, const char *name, uint64_t *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_UINT64_ARRAY, n, a)); +} + +int +nvlist_add_string_array(nvlist_t *nvl, const char *name, + char *const *a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_STRING_ARRAY, n, a)); +} + +int +nvlist_add_hrtime(nvlist_t *nvl, const char *name, hrtime_t val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_HRTIME, 1, &val)); +} + +int +nvlist_add_nvlist(nvlist_t *nvl, const char *name, nvlist_t *val) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_NVLIST, 1, val)); +} + +int +nvlist_add_nvlist_array(nvlist_t *nvl, const char *name, nvlist_t **a, uint_t n) +{ + return (nvlist_add_common(nvl, name, DATA_TYPE_NVLIST_ARRAY, n, a)); +} + +/* reading name-value pairs */ +nvpair_t * +nvlist_next_nvpair(nvlist_t *nvl, nvpair_t *nvp) +{ + nvpriv_t *priv; + i_nvp_t *curr; + + if (nvl == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (NULL); + + curr = NVPAIR2I_NVP(nvp); + + /* + * Ensure that nvp is a valid nvpair on this nvlist. + * NB: nvp_curr is used only as a hint so that we don't always + * have to walk the list to determine if nvp is still on the list. + */ + if (nvp == NULL) + curr = priv->nvp_list; + else if (priv->nvp_curr == curr || nvlist_contains_nvp(nvl, nvp)) + curr = curr->nvi_next; + else + curr = NULL; + + priv->nvp_curr = curr; + + return (curr != NULL ? &curr->nvi_nvp : NULL); +} + +char * +nvpair_name(nvpair_t *nvp) +{ + return (NVP_NAME(nvp)); +} + +data_type_t +nvpair_type(nvpair_t *nvp) +{ + return (NVP_TYPE(nvp)); +} + +int +nvpair_type_is_array(nvpair_t *nvp) +{ + data_type_t type = NVP_TYPE(nvp); + + if ((type == DATA_TYPE_BYTE_ARRAY) || + (type == DATA_TYPE_UINT8_ARRAY) || + (type == DATA_TYPE_INT16_ARRAY) || + (type == DATA_TYPE_UINT16_ARRAY) || + (type == DATA_TYPE_INT32_ARRAY) || + (type == DATA_TYPE_UINT32_ARRAY) || + (type == DATA_TYPE_INT64_ARRAY) || + (type == DATA_TYPE_UINT64_ARRAY) || + (type == DATA_TYPE_BOOLEAN_ARRAY) || + (type == DATA_TYPE_STRING_ARRAY) || + (type == DATA_TYPE_NVLIST_ARRAY)) + return (1); + return (0); + +} + +static int +nvpair_value_common(nvpair_t *nvp, data_type_t type, uint_t *nelem, void *data) +{ + if (nvp == NULL || nvpair_type(nvp) != type) + return (EINVAL); + + /* + * For non-array types, we copy the data. + * For array types (including string), we set a pointer. + */ + switch (type) { + case DATA_TYPE_BOOLEAN: + if (nelem != NULL) + *nelem = 0; + break; + + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + case DATA_TYPE_INT16: + case DATA_TYPE_UINT16: + case DATA_TYPE_INT32: + case DATA_TYPE_UINT32: + case DATA_TYPE_INT64: + case DATA_TYPE_UINT64: + case DATA_TYPE_HRTIME: +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: +#endif + if (data == NULL) + return (EINVAL); + bcopy(NVP_VALUE(nvp), data, + (size_t)i_get_value_size(type, NULL, 1)); + if (nelem != NULL) + *nelem = 1; + break; + + case DATA_TYPE_NVLIST: + case DATA_TYPE_STRING: + if (data == NULL) + return (EINVAL); + *(void **)data = (void *)NVP_VALUE(nvp); + if (nelem != NULL) + *nelem = 1; + break; + + case DATA_TYPE_BOOLEAN_ARRAY: + case DATA_TYPE_BYTE_ARRAY: + case DATA_TYPE_INT8_ARRAY: + case DATA_TYPE_UINT8_ARRAY: + case DATA_TYPE_INT16_ARRAY: + case DATA_TYPE_UINT16_ARRAY: + case DATA_TYPE_INT32_ARRAY: + case DATA_TYPE_UINT32_ARRAY: + case DATA_TYPE_INT64_ARRAY: + case DATA_TYPE_UINT64_ARRAY: + case DATA_TYPE_STRING_ARRAY: + case DATA_TYPE_NVLIST_ARRAY: + if (nelem == NULL || data == NULL) + return (EINVAL); + if ((*nelem = NVP_NELEM(nvp)) != 0) + *(void **)data = (void *)NVP_VALUE(nvp); + else + *(void **)data = NULL; + break; + + default: + return (ENOTSUP); + } + + return (0); +} + +static int +nvlist_lookup_common(nvlist_t *nvl, const char *name, data_type_t type, + uint_t *nelem, void *data) +{ + nvpriv_t *priv; + nvpair_t *nvp; + i_nvp_t *curr; + + if (name == NULL || nvl == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (EINVAL); + + if (!(nvl->nvl_nvflag & (NV_UNIQUE_NAME | NV_UNIQUE_NAME_TYPE))) + return (ENOTSUP); + + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) { + nvp = &curr->nvi_nvp; + + if (strcmp(name, NVP_NAME(nvp)) == 0 && NVP_TYPE(nvp) == type) + return (nvpair_value_common(nvp, type, nelem, data)); + } + + return (ENOENT); +} + +int +nvlist_lookup_boolean(nvlist_t *nvl, const char *name) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_BOOLEAN, NULL, NULL)); +} + +int +nvlist_lookup_boolean_value(nvlist_t *nvl, const char *name, boolean_t *val) +{ + return (nvlist_lookup_common(nvl, name, + DATA_TYPE_BOOLEAN_VALUE, NULL, val)); +} + +int +nvlist_lookup_byte(nvlist_t *nvl, const char *name, uchar_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_BYTE, NULL, val)); +} + +int +nvlist_lookup_int8(nvlist_t *nvl, const char *name, int8_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT8, NULL, val)); +} + +int +nvlist_lookup_uint8(nvlist_t *nvl, const char *name, uint8_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT8, NULL, val)); +} + +int +nvlist_lookup_int16(nvlist_t *nvl, const char *name, int16_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT16, NULL, val)); +} + +int +nvlist_lookup_uint16(nvlist_t *nvl, const char *name, uint16_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT16, NULL, val)); +} + +int +nvlist_lookup_int32(nvlist_t *nvl, const char *name, int32_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT32, NULL, val)); +} + +int +nvlist_lookup_uint32(nvlist_t *nvl, const char *name, uint32_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT32, NULL, val)); +} + +int +nvlist_lookup_int64(nvlist_t *nvl, const char *name, int64_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT64, NULL, val)); +} + +int +nvlist_lookup_uint64(nvlist_t *nvl, const char *name, uint64_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT64, NULL, val)); +} + +#if !defined(_KERNEL) +int +nvlist_lookup_double(nvlist_t *nvl, const char *name, double *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_DOUBLE, NULL, val)); +} +#endif + +int +nvlist_lookup_string(nvlist_t *nvl, const char *name, char **val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_STRING, NULL, val)); +} + +int +nvlist_lookup_nvlist(nvlist_t *nvl, const char *name, nvlist_t **val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_NVLIST, NULL, val)); +} + +int +nvlist_lookup_boolean_array(nvlist_t *nvl, const char *name, + boolean_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, + DATA_TYPE_BOOLEAN_ARRAY, n, a)); +} + +int +nvlist_lookup_byte_array(nvlist_t *nvl, const char *name, + uchar_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_BYTE_ARRAY, n, a)); +} + +int +nvlist_lookup_int8_array(nvlist_t *nvl, const char *name, int8_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT8_ARRAY, n, a)); +} + +int +nvlist_lookup_uint8_array(nvlist_t *nvl, const char *name, + uint8_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT8_ARRAY, n, a)); +} + +int +nvlist_lookup_int16_array(nvlist_t *nvl, const char *name, + int16_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT16_ARRAY, n, a)); +} + +int +nvlist_lookup_uint16_array(nvlist_t *nvl, const char *name, + uint16_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT16_ARRAY, n, a)); +} + +int +nvlist_lookup_int32_array(nvlist_t *nvl, const char *name, + int32_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT32_ARRAY, n, a)); +} + +int +nvlist_lookup_uint32_array(nvlist_t *nvl, const char *name, + uint32_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT32_ARRAY, n, a)); +} + +int +nvlist_lookup_int64_array(nvlist_t *nvl, const char *name, + int64_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_INT64_ARRAY, n, a)); +} + +int +nvlist_lookup_uint64_array(nvlist_t *nvl, const char *name, + uint64_t **a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_UINT64_ARRAY, n, a)); +} + +int +nvlist_lookup_string_array(nvlist_t *nvl, const char *name, + char ***a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_STRING_ARRAY, n, a)); +} + +int +nvlist_lookup_nvlist_array(nvlist_t *nvl, const char *name, + nvlist_t ***a, uint_t *n) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_NVLIST_ARRAY, n, a)); +} + +int +nvlist_lookup_hrtime(nvlist_t *nvl, const char *name, hrtime_t *val) +{ + return (nvlist_lookup_common(nvl, name, DATA_TYPE_HRTIME, NULL, val)); +} + +int +nvlist_lookup_pairs(nvlist_t *nvl, int flag, ...) +{ + va_list ap; + char *name; + int noentok = (flag & NV_FLAG_NOENTOK ? 1 : 0); + int ret = 0; + + va_start(ap, flag); + while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { + data_type_t type; + void *val; + uint_t *nelem; + + switch (type = va_arg(ap, data_type_t)) { + case DATA_TYPE_BOOLEAN: + ret = nvlist_lookup_common(nvl, name, type, NULL, NULL); + break; + + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + case DATA_TYPE_INT16: + case DATA_TYPE_UINT16: + case DATA_TYPE_INT32: + case DATA_TYPE_UINT32: + case DATA_TYPE_INT64: + case DATA_TYPE_UINT64: + case DATA_TYPE_HRTIME: + case DATA_TYPE_STRING: + case DATA_TYPE_NVLIST: +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: +#endif + val = va_arg(ap, void *); + ret = nvlist_lookup_common(nvl, name, type, NULL, val); + break; + + case DATA_TYPE_BYTE_ARRAY: + case DATA_TYPE_BOOLEAN_ARRAY: + case DATA_TYPE_INT8_ARRAY: + case DATA_TYPE_UINT8_ARRAY: + case DATA_TYPE_INT16_ARRAY: + case DATA_TYPE_UINT16_ARRAY: + case DATA_TYPE_INT32_ARRAY: + case DATA_TYPE_UINT32_ARRAY: + case DATA_TYPE_INT64_ARRAY: + case DATA_TYPE_UINT64_ARRAY: + case DATA_TYPE_STRING_ARRAY: + case DATA_TYPE_NVLIST_ARRAY: + val = va_arg(ap, void *); + nelem = va_arg(ap, uint_t *); + ret = nvlist_lookup_common(nvl, name, type, nelem, val); + break; + + default: + ret = EINVAL; + } + + if (ret == ENOENT && noentok) + ret = 0; + } + va_end(ap); + + return (ret); +} + +/* + * Find the 'name'ed nvpair in the nvlist 'nvl'. If 'name' found, the function + * returns zero and a pointer to the matching nvpair is returned in '*ret' + * (given 'ret' is non-NULL). If 'sep' is specified then 'name' will penitrate + * multiple levels of embedded nvlists, with 'sep' as the separator. As an + * example, if sep is '.', name might look like: "a" or "a.b" or "a.c[3]" or + * "a.d[3].e[1]". This matches the C syntax for array embed (for convience, + * code also supports "a.d[3]e[1]" syntax). + * + * If 'ip' is non-NULL and the last name component is an array, return the + * value of the "...[index]" array index in *ip. For an array reference that + * is not indexed, *ip will be returned as -1. If there is a syntax error in + * 'name', and 'ep' is non-NULL then *ep will be set to point to the location + * inside the 'name' string where the syntax error was detected. + */ +static int +nvlist_lookup_nvpair_ei_sep(nvlist_t *nvl, const char *name, const char sep, + nvpair_t **ret, int *ip, char **ep) +{ + nvpair_t *nvp; + const char *np; + char *sepp; + char *idxp, *idxep; + nvlist_t **nva; + long idx; + int n; + + if (ip) + *ip = -1; /* not indexed */ + if (ep) + *ep = NULL; + + if ((nvl == NULL) || (name == NULL)) + return (EINVAL); + + /* step through components of name */ + for (np = name; np && *np; np = sepp) { + /* ensure unique names */ + if (!(nvl->nvl_nvflag & NV_UNIQUE_NAME)) + return (ENOTSUP); + + /* skip white space */ + skip_whitespace(np); + if (*np == 0) + break; + + /* set 'sepp' to end of current component 'np' */ + if (sep) + sepp = strchr(np, sep); + else + sepp = NULL; + + /* find start of next "[ index ]..." */ + idxp = strchr(np, '['); + + /* if sepp comes first, set idxp to NULL */ + if (sepp && idxp && (sepp < idxp)) + idxp = NULL; + + /* + * At this point 'idxp' is set if there is an index + * expected for the current component. + */ + if (idxp) { + /* set 'n' to length of current 'np' name component */ + n = idxp++ - np; + + /* keep sepp up to date for *ep use as we advance */ + skip_whitespace(idxp); + sepp = idxp; + + /* determine the index value */ +#if defined(_KERNEL) && !defined(_BOOT) + if (ddi_strtol(idxp, &idxep, 0, &idx)) + goto fail; +#else + idx = strtol(idxp, &idxep, 0); +#endif + if (idxep == idxp) + goto fail; + + /* keep sepp up to date for *ep use as we advance */ + sepp = idxep; + + /* skip white space index value and check for ']' */ + skip_whitespace(sepp); + if (*sepp++ != ']') + goto fail; + + /* for embedded arrays, support C syntax: "a[1].b" */ + skip_whitespace(sepp); + if (sep && (*sepp == sep)) + sepp++; + } else if (sepp) { + n = sepp++ - np; + } else { + n = strlen(np); + } + + /* trim trailing whitespace by reducing length of 'np' */ + if (n == 0) + goto fail; + for (n--; (np[n] == ' ') || (np[n] == '\t'); n--) + ; + n++; + + /* skip whitespace, and set sepp to NULL if complete */ + if (sepp) { + skip_whitespace(sepp); + if (*sepp == 0) + sepp = NULL; + } + + /* + * At this point: + * o 'n' is the length of current 'np' component. + * o 'idxp' is set if there was an index, and value 'idx'. + * o 'sepp' is set to the beginning of the next component, + * and set to NULL if we have no more components. + * + * Search for nvpair with matching component name. + */ + for (nvp = nvlist_next_nvpair(nvl, NULL); nvp != NULL; + nvp = nvlist_next_nvpair(nvl, nvp)) { + + /* continue if no match on name */ + if (strncmp(np, nvpair_name(nvp), n) || + (strlen(nvpair_name(nvp)) != n)) + continue; + + /* if indexed, verify type is array oriented */ + if (idxp && !nvpair_type_is_array(nvp)) + goto fail; + + /* + * Full match found, return nvp and idx if this + * was the last component. + */ + if (sepp == NULL) { + if (ret) + *ret = nvp; + if (ip && idxp) + *ip = (int)idx; /* return index */ + return (0); /* found */ + } + + /* + * More components: current match must be + * of DATA_TYPE_NVLIST or DATA_TYPE_NVLIST_ARRAY + * to support going deeper. + */ + if (nvpair_type(nvp) == DATA_TYPE_NVLIST) { + nvl = EMBEDDED_NVL(nvp); + break; + } else if (nvpair_type(nvp) == DATA_TYPE_NVLIST_ARRAY) { + (void) nvpair_value_nvlist_array(nvp, + &nva, (uint_t *)&n); + if ((n < 0) || (idx >= n)) + goto fail; + nvl = nva[idx]; + break; + } + + /* type does not support more levels */ + goto fail; + } + if (nvp == NULL) + goto fail; /* 'name' not found */ + + /* search for match of next component in embedded 'nvl' list */ + } + +fail: if (ep && sepp) + *ep = sepp; + return (EINVAL); +} + +/* + * Return pointer to nvpair with specified 'name'. + */ +int +nvlist_lookup_nvpair(nvlist_t *nvl, const char *name, nvpair_t **ret) +{ + return (nvlist_lookup_nvpair_ei_sep(nvl, name, 0, ret, NULL, NULL)); +} + +/* + * Determine if named nvpair exists in nvlist (use embedded separator of '.' + * and return array index). See nvlist_lookup_nvpair_ei_sep for more detailed + * description. + */ +int nvlist_lookup_nvpair_embedded_index(nvlist_t *nvl, + const char *name, nvpair_t **ret, int *ip, char **ep) +{ + return (nvlist_lookup_nvpair_ei_sep(nvl, name, '.', ret, ip, ep)); +} + +boolean_t +nvlist_exists(nvlist_t *nvl, const char *name) +{ + nvpriv_t *priv; + nvpair_t *nvp; + i_nvp_t *curr; + + if (name == NULL || nvl == NULL || + (priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (B_FALSE); + + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) { + nvp = &curr->nvi_nvp; + + if (strcmp(name, NVP_NAME(nvp)) == 0) + return (B_TRUE); + } + + return (B_FALSE); +} + +int +nvpair_value_boolean_value(nvpair_t *nvp, boolean_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_BOOLEAN_VALUE, NULL, val)); +} + +int +nvpair_value_byte(nvpair_t *nvp, uchar_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_BYTE, NULL, val)); +} + +int +nvpair_value_int8(nvpair_t *nvp, int8_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT8, NULL, val)); +} + +int +nvpair_value_uint8(nvpair_t *nvp, uint8_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT8, NULL, val)); +} + +int +nvpair_value_int16(nvpair_t *nvp, int16_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT16, NULL, val)); +} + +int +nvpair_value_uint16(nvpair_t *nvp, uint16_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT16, NULL, val)); +} + +int +nvpair_value_int32(nvpair_t *nvp, int32_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT32, NULL, val)); +} + +int +nvpair_value_uint32(nvpair_t *nvp, uint32_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT32, NULL, val)); +} + +int +nvpair_value_int64(nvpair_t *nvp, int64_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT64, NULL, val)); +} + +int +nvpair_value_uint64(nvpair_t *nvp, uint64_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT64, NULL, val)); +} + +#if !defined(_KERNEL) +int +nvpair_value_double(nvpair_t *nvp, double *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_DOUBLE, NULL, val)); +} +#endif + +int +nvpair_value_string(nvpair_t *nvp, char **val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_STRING, NULL, val)); +} + +int +nvpair_value_nvlist(nvpair_t *nvp, nvlist_t **val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_NVLIST, NULL, val)); +} + +int +nvpair_value_boolean_array(nvpair_t *nvp, boolean_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_BOOLEAN_ARRAY, nelem, val)); +} + +int +nvpair_value_byte_array(nvpair_t *nvp, uchar_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_BYTE_ARRAY, nelem, val)); +} + +int +nvpair_value_int8_array(nvpair_t *nvp, int8_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT8_ARRAY, nelem, val)); +} + +int +nvpair_value_uint8_array(nvpair_t *nvp, uint8_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT8_ARRAY, nelem, val)); +} + +int +nvpair_value_int16_array(nvpair_t *nvp, int16_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT16_ARRAY, nelem, val)); +} + +int +nvpair_value_uint16_array(nvpair_t *nvp, uint16_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT16_ARRAY, nelem, val)); +} + +int +nvpair_value_int32_array(nvpair_t *nvp, int32_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT32_ARRAY, nelem, val)); +} + +int +nvpair_value_uint32_array(nvpair_t *nvp, uint32_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT32_ARRAY, nelem, val)); +} + +int +nvpair_value_int64_array(nvpair_t *nvp, int64_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_INT64_ARRAY, nelem, val)); +} + +int +nvpair_value_uint64_array(nvpair_t *nvp, uint64_t **val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_UINT64_ARRAY, nelem, val)); +} + +int +nvpair_value_string_array(nvpair_t *nvp, char ***val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_STRING_ARRAY, nelem, val)); +} + +int +nvpair_value_nvlist_array(nvpair_t *nvp, nvlist_t ***val, uint_t *nelem) +{ + return (nvpair_value_common(nvp, DATA_TYPE_NVLIST_ARRAY, nelem, val)); +} + +int +nvpair_value_hrtime(nvpair_t *nvp, hrtime_t *val) +{ + return (nvpair_value_common(nvp, DATA_TYPE_HRTIME, NULL, val)); +} + +/* + * Add specified pair to the list. + */ +int +nvlist_add_nvpair(nvlist_t *nvl, nvpair_t *nvp) +{ + if (nvl == NULL || nvp == NULL) + return (EINVAL); + + return (nvlist_add_common(nvl, NVP_NAME(nvp), NVP_TYPE(nvp), + NVP_NELEM(nvp), NVP_VALUE(nvp))); +} + +/* + * Merge the supplied nvlists and put the result in dst. + * The merged list will contain all names specified in both lists, + * the values are taken from nvl in the case of duplicates. + * Return 0 on success. + */ +/*ARGSUSED*/ +int +nvlist_merge(nvlist_t *dst, nvlist_t *nvl, int flag) +{ + if (nvl == NULL || dst == NULL) + return (EINVAL); + + if (dst != nvl) + return (nvlist_copy_pairs(nvl, dst)); + + return (0); +} + +/* + * Encoding related routines + */ +#define NVS_OP_ENCODE 0 +#define NVS_OP_DECODE 1 +#define NVS_OP_GETSIZE 2 + +typedef struct nvs_ops nvs_ops_t; + +typedef struct { + int nvs_op; + const nvs_ops_t *nvs_ops; + void *nvs_private; + nvpriv_t *nvs_priv; +} nvstream_t; + +/* + * nvs operations are: + * - nvs_nvlist + * encoding / decoding of a nvlist header (nvlist_t) + * calculates the size used for header and end detection + * + * - nvs_nvpair + * responsible for the first part of encoding / decoding of an nvpair + * calculates the decoded size of an nvpair + * + * - nvs_nvp_op + * second part of encoding / decoding of an nvpair + * + * - nvs_nvp_size + * calculates the encoding size of an nvpair + * + * - nvs_nvl_fini + * encodes the end detection mark (zeros). + */ +struct nvs_ops { + int (*nvs_nvlist)(nvstream_t *, nvlist_t *, size_t *); + int (*nvs_nvpair)(nvstream_t *, nvpair_t *, size_t *); + int (*nvs_nvp_op)(nvstream_t *, nvpair_t *); + int (*nvs_nvp_size)(nvstream_t *, nvpair_t *, size_t *); + int (*nvs_nvl_fini)(nvstream_t *); +}; + +typedef struct { + char nvh_encoding; /* nvs encoding method */ + char nvh_endian; /* nvs endian */ + char nvh_reserved1; /* reserved for future use */ + char nvh_reserved2; /* reserved for future use */ +} nvs_header_t; + +static int +nvs_encode_pairs(nvstream_t *nvs, nvlist_t *nvl) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *curr; + + /* + * Walk nvpair in list and encode each nvpair + */ + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) + if (nvs->nvs_ops->nvs_nvpair(nvs, &curr->nvi_nvp, NULL) != 0) + return (EFAULT); + + return (nvs->nvs_ops->nvs_nvl_fini(nvs)); +} + +static int +nvs_decode_pairs(nvstream_t *nvs, nvlist_t *nvl) +{ + nvpair_t *nvp; + size_t nvsize; + int err; + + /* + * Get decoded size of next pair in stream, alloc + * memory for nvpair_t, then decode the nvpair + */ + while ((err = nvs->nvs_ops->nvs_nvpair(nvs, NULL, &nvsize)) == 0) { + if (nvsize == 0) /* end of list */ + break; + + /* make sure len makes sense */ + if (nvsize < NVP_SIZE_CALC(1, 0)) + return (EFAULT); + + if ((nvp = nvp_buf_alloc(nvl, nvsize)) == NULL) + return (ENOMEM); + + if ((err = nvs->nvs_ops->nvs_nvp_op(nvs, nvp)) != 0) { + nvp_buf_free(nvl, nvp); + return (err); + } + + if (i_validate_nvpair(nvp) != 0) { + nvpair_free(nvp); + nvp_buf_free(nvl, nvp); + return (EFAULT); + } + + nvp_buf_link(nvl, nvp); + } + return (err); +} + +static int +nvs_getsize_pairs(nvstream_t *nvs, nvlist_t *nvl, size_t *buflen) +{ + nvpriv_t *priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv; + i_nvp_t *curr; + uint64_t nvsize = *buflen; + size_t size; + + /* + * Get encoded size of nvpairs in nvlist + */ + for (curr = priv->nvp_list; curr != NULL; curr = curr->nvi_next) { + if (nvs->nvs_ops->nvs_nvp_size(nvs, &curr->nvi_nvp, &size) != 0) + return (EINVAL); + + if ((nvsize += size) > INT32_MAX) + return (EINVAL); + } + + *buflen = nvsize; + return (0); +} + +static int +nvs_operation(nvstream_t *nvs, nvlist_t *nvl, size_t *buflen) +{ + int err; + + if (nvl->nvl_priv == 0) + return (EFAULT); + + /* + * Perform the operation, starting with header, then each nvpair + */ + if ((err = nvs->nvs_ops->nvs_nvlist(nvs, nvl, buflen)) != 0) + return (err); + + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + err = nvs_encode_pairs(nvs, nvl); + break; + + case NVS_OP_DECODE: + err = nvs_decode_pairs(nvs, nvl); + break; + + case NVS_OP_GETSIZE: + err = nvs_getsize_pairs(nvs, nvl, buflen); + break; + + default: + err = EINVAL; + } + + return (err); +} + +static int +nvs_embedded(nvstream_t *nvs, nvlist_t *embedded) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + return (nvs_operation(nvs, embedded, NULL)); + + case NVS_OP_DECODE: { + nvpriv_t *priv; + int err; + + if (embedded->nvl_version != NV_VERSION) + return (ENOTSUP); + + if ((priv = nv_priv_alloc_embedded(nvs->nvs_priv)) == NULL) + return (ENOMEM); + + nvlist_init(embedded, embedded->nvl_nvflag, priv); + + if ((err = nvs_operation(nvs, embedded, NULL)) != 0) + nvlist_free(embedded); + return (err); + } + default: + break; + } + + return (EINVAL); +} + +static int +nvs_embedded_nvl_array(nvstream_t *nvs, nvpair_t *nvp, size_t *size) +{ + size_t nelem = NVP_NELEM(nvp); + nvlist_t **nvlp = EMBEDDED_NVL_ARRAY(nvp); + int i; + + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + for (i = 0; i < nelem; i++) + if (nvs_embedded(nvs, nvlp[i]) != 0) + return (EFAULT); + break; + + case NVS_OP_DECODE: { + size_t len = nelem * sizeof (uint64_t); + nvlist_t *embedded = (nvlist_t *)((uintptr_t)nvlp + len); + + bzero(nvlp, len); /* don't trust packed data */ + for (i = 0; i < nelem; i++) { + if (nvs_embedded(nvs, embedded) != 0) { + nvpair_free(nvp); + return (EFAULT); + } + + nvlp[i] = embedded++; + } + break; + } + case NVS_OP_GETSIZE: { + uint64_t nvsize = 0; + + for (i = 0; i < nelem; i++) { + size_t nvp_sz = 0; + + if (nvs_operation(nvs, nvlp[i], &nvp_sz) != 0) + return (EINVAL); + + if ((nvsize += nvp_sz) > INT32_MAX) + return (EINVAL); + } + + *size = nvsize; + break; + } + default: + return (EINVAL); + } + + return (0); +} + +static int nvs_native(nvstream_t *, nvlist_t *, char *, size_t *); +static int nvs_xdr(nvstream_t *, nvlist_t *, char *, size_t *); + +/* + * Common routine for nvlist operations: + * encode, decode, getsize (encoded size). + */ +static int +nvlist_common(nvlist_t *nvl, char *buf, size_t *buflen, int encoding, + int nvs_op) +{ + int err = 0; + nvstream_t nvs; + int nvl_endian; +#ifdef _LITTLE_ENDIAN + int host_endian = 1; +#else + int host_endian = 0; +#endif /* _LITTLE_ENDIAN */ + nvs_header_t *nvh = (void *)buf; + + if (buflen == NULL || nvl == NULL || + (nvs.nvs_priv = (nvpriv_t *)(uintptr_t)nvl->nvl_priv) == NULL) + return (EINVAL); + + nvs.nvs_op = nvs_op; + + /* + * For NVS_OP_ENCODE and NVS_OP_DECODE make sure an nvlist and + * a buffer is allocated. The first 4 bytes in the buffer are + * used for encoding method and host endian. + */ + switch (nvs_op) { + case NVS_OP_ENCODE: + if (buf == NULL || *buflen < sizeof (nvs_header_t)) + return (EINVAL); + + nvh->nvh_encoding = encoding; + nvh->nvh_endian = nvl_endian = host_endian; + nvh->nvh_reserved1 = 0; + nvh->nvh_reserved2 = 0; + break; + + case NVS_OP_DECODE: + if (buf == NULL || *buflen < sizeof (nvs_header_t)) + return (EINVAL); + + /* get method of encoding from first byte */ + encoding = nvh->nvh_encoding; + nvl_endian = nvh->nvh_endian; + break; + + case NVS_OP_GETSIZE: + nvl_endian = host_endian; + + /* + * add the size for encoding + */ + *buflen = sizeof (nvs_header_t); + break; + + default: + return (ENOTSUP); + } + + /* + * Create an nvstream with proper encoding method + */ + switch (encoding) { + case NV_ENCODE_NATIVE: + /* + * check endianness, in case we are unpacking + * from a file + */ + if (nvl_endian != host_endian) + return (ENOTSUP); + err = nvs_native(&nvs, nvl, buf, buflen); + break; + case NV_ENCODE_XDR: + err = nvs_xdr(&nvs, nvl, buf, buflen); + break; + default: + err = ENOTSUP; + break; + } + + return (err); +} + +int +nvlist_size(nvlist_t *nvl, size_t *size, int encoding) +{ + return (nvlist_common(nvl, NULL, size, encoding, NVS_OP_GETSIZE)); +} + +/* + * Pack nvlist into contiguous memory + */ +/*ARGSUSED1*/ +int +nvlist_pack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding, + int kmflag) +{ +#if defined(_KERNEL) && !defined(_BOOT) + return (nvlist_xpack(nvl, bufp, buflen, encoding, + (kmflag == KM_SLEEP ? nv_alloc_sleep : nv_alloc_nosleep))); +#else + return (nvlist_xpack(nvl, bufp, buflen, encoding, nv_alloc_nosleep)); +#endif +} + +int +nvlist_xpack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding, + nv_alloc_t *nva) +{ + nvpriv_t nvpriv; + size_t alloc_size; + char *buf; + int err; + + if (nva == NULL || nvl == NULL || bufp == NULL || buflen == NULL) + return (EINVAL); + + if (*bufp != NULL) + return (nvlist_common(nvl, *bufp, buflen, encoding, + NVS_OP_ENCODE)); + + /* + * Here is a difficult situation: + * 1. The nvlist has fixed allocator properties. + * All other nvlist routines (like nvlist_add_*, ...) use + * these properties. + * 2. When using nvlist_pack() the user can specify his own + * allocator properties (e.g. by using KM_NOSLEEP). + * + * We use the user specified properties (2). A clearer solution + * will be to remove the kmflag from nvlist_pack(), but we will + * not change the interface. + */ + nv_priv_init(&nvpriv, nva, 0); + + if (err = nvlist_size(nvl, &alloc_size, encoding)) + return (err); + + if ((buf = nv_mem_zalloc(&nvpriv, alloc_size)) == NULL) + return (ENOMEM); + + if ((err = nvlist_common(nvl, buf, &alloc_size, encoding, + NVS_OP_ENCODE)) != 0) { + nv_mem_free(&nvpriv, buf, alloc_size); + } else { + *buflen = alloc_size; + *bufp = buf; + } + + return (err); +} + +/* + * Unpack buf into an nvlist_t + */ +/*ARGSUSED1*/ +int +nvlist_unpack(char *buf, size_t buflen, nvlist_t **nvlp, int kmflag) +{ +#if defined(_KERNEL) && !defined(_BOOT) + return (nvlist_xunpack(buf, buflen, nvlp, + (kmflag == KM_SLEEP ? nv_alloc_sleep : nv_alloc_nosleep))); +#else + return (nvlist_xunpack(buf, buflen, nvlp, nv_alloc_nosleep)); +#endif +} + +int +nvlist_xunpack(char *buf, size_t buflen, nvlist_t **nvlp, nv_alloc_t *nva) +{ + nvlist_t *nvl; + int err; + + if (nvlp == NULL) + return (EINVAL); + + if ((err = nvlist_xalloc(&nvl, 0, nva)) != 0) + return (err); + + if ((err = nvlist_common(nvl, buf, &buflen, 0, NVS_OP_DECODE)) != 0) + nvlist_free(nvl); + else + *nvlp = nvl; + + return (err); +} + +/* + * Native encoding functions + */ +typedef struct { + /* + * This structure is used when decoding a packed nvpair in + * the native format. n_base points to a buffer containing the + * packed nvpair. n_end is a pointer to the end of the buffer. + * (n_end actually points to the first byte past the end of the + * buffer.) n_curr is a pointer that lies between n_base and n_end. + * It points to the current data that we are decoding. + * The amount of data left in the buffer is equal to n_end - n_curr. + * n_flag is used to recognize a packed embedded list. + */ + caddr_t n_base; + caddr_t n_end; + caddr_t n_curr; + uint_t n_flag; +} nvs_native_t; + +static int +nvs_native_create(nvstream_t *nvs, nvs_native_t *native, char *buf, + size_t buflen) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + case NVS_OP_DECODE: + nvs->nvs_private = native; + native->n_curr = native->n_base = buf; + native->n_end = buf + buflen; + native->n_flag = 0; + return (0); + + case NVS_OP_GETSIZE: + nvs->nvs_private = native; + native->n_curr = native->n_base = native->n_end = NULL; + native->n_flag = 0; + return (0); + default: + return (EINVAL); + } +} + +/*ARGSUSED*/ +static void +nvs_native_destroy(nvstream_t *nvs) +{ +} + +static int +native_cp(nvstream_t *nvs, void *buf, size_t size) +{ + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + + if (native->n_curr + size > native->n_end) + return (EFAULT); + + /* + * The bcopy() below eliminates alignment requirement + * on the buffer (stream) and is preferred over direct access. + */ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + bcopy(buf, native->n_curr, size); + break; + case NVS_OP_DECODE: + bcopy(native->n_curr, buf, size); + break; + default: + return (EINVAL); + } + + native->n_curr += size; + return (0); +} + +/* + * operate on nvlist_t header + */ +static int +nvs_native_nvlist(nvstream_t *nvs, nvlist_t *nvl, size_t *size) +{ + nvs_native_t *native = nvs->nvs_private; + + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + case NVS_OP_DECODE: + if (native->n_flag) + return (0); /* packed embedded list */ + + native->n_flag = 1; + + /* copy version and nvflag of the nvlist_t */ + if (native_cp(nvs, &nvl->nvl_version, sizeof (int32_t)) != 0 || + native_cp(nvs, &nvl->nvl_nvflag, sizeof (int32_t)) != 0) + return (EFAULT); + + return (0); + + case NVS_OP_GETSIZE: + /* + * if calculate for packed embedded list + * 4 for end of the embedded list + * else + * 2 * sizeof (int32_t) for nvl_version and nvl_nvflag + * and 4 for end of the entire list + */ + if (native->n_flag) { + *size += 4; + } else { + native->n_flag = 1; + *size += 2 * sizeof (int32_t) + 4; + } + + return (0); + + default: + return (EINVAL); + } +} + +static int +nvs_native_nvl_fini(nvstream_t *nvs) +{ + if (nvs->nvs_op == NVS_OP_ENCODE) { + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + /* + * Add 4 zero bytes at end of nvlist. They are used + * for end detection by the decode routine. + */ + if (native->n_curr + sizeof (int) > native->n_end) + return (EFAULT); + + bzero(native->n_curr, sizeof (int)); + native->n_curr += sizeof (int); + } + + return (0); +} + +static int +nvpair_native_embedded(nvstream_t *nvs, nvpair_t *nvp) +{ + if (nvs->nvs_op == NVS_OP_ENCODE) { + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + nvlist_t *packed = (void *) + (native->n_curr - nvp->nvp_size + NVP_VALOFF(nvp)); + /* + * Null out the pointer that is meaningless in the packed + * structure. The address may not be aligned, so we have + * to use bzero. + */ + bzero(&packed->nvl_priv, sizeof (packed->nvl_priv)); + } + + return (nvs_embedded(nvs, EMBEDDED_NVL(nvp))); +} + +static int +nvpair_native_embedded_array(nvstream_t *nvs, nvpair_t *nvp) +{ + if (nvs->nvs_op == NVS_OP_ENCODE) { + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + char *value = native->n_curr - nvp->nvp_size + NVP_VALOFF(nvp); + size_t len = NVP_NELEM(nvp) * sizeof (uint64_t); + nvlist_t *packed = (nvlist_t *)((uintptr_t)value + len); + int i; + /* + * Null out pointers that are meaningless in the packed + * structure. The addresses may not be aligned, so we have + * to use bzero. + */ + bzero(value, len); + + for (i = 0; i < NVP_NELEM(nvp); i++, packed++) + /* + * Null out the pointer that is meaningless in the + * packed structure. The address may not be aligned, + * so we have to use bzero. + */ + bzero(&packed->nvl_priv, sizeof (packed->nvl_priv)); + } + + return (nvs_embedded_nvl_array(nvs, nvp, NULL)); +} + +static void +nvpair_native_string_array(nvstream_t *nvs, nvpair_t *nvp) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: { + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + uint64_t *strp = (void *) + (native->n_curr - nvp->nvp_size + NVP_VALOFF(nvp)); + /* + * Null out pointers that are meaningless in the packed + * structure. The addresses may not be aligned, so we have + * to use bzero. + */ + bzero(strp, NVP_NELEM(nvp) * sizeof (uint64_t)); + break; + } + case NVS_OP_DECODE: { + char **strp = (void *)NVP_VALUE(nvp); + char *buf = ((char *)strp + NVP_NELEM(nvp) * sizeof (uint64_t)); + int i; + + for (i = 0; i < NVP_NELEM(nvp); i++) { + strp[i] = buf; + buf += strlen(buf) + 1; + } + break; + } + } +} + +static int +nvs_native_nvp_op(nvstream_t *nvs, nvpair_t *nvp) +{ + data_type_t type; + int value_sz; + int ret = 0; + + /* + * We do the initial bcopy of the data before we look at + * the nvpair type, because when we're decoding, we won't + * have the correct values for the pair until we do the bcopy. + */ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + case NVS_OP_DECODE: + if (native_cp(nvs, nvp, nvp->nvp_size) != 0) + return (EFAULT); + break; + default: + return (EINVAL); + } + + /* verify nvp_name_sz, check the name string length */ + if (i_validate_nvpair_name(nvp) != 0) + return (EFAULT); + + type = NVP_TYPE(nvp); + + /* + * Verify type and nelem and get the value size. + * In case of data types DATA_TYPE_STRING and DATA_TYPE_STRING_ARRAY + * is the size of the string(s) excluded. + */ + if ((value_sz = i_get_value_size(type, NULL, NVP_NELEM(nvp))) < 0) + return (EFAULT); + + if (NVP_SIZE_CALC(nvp->nvp_name_sz, value_sz) > nvp->nvp_size) + return (EFAULT); + + switch (type) { + case DATA_TYPE_NVLIST: + ret = nvpair_native_embedded(nvs, nvp); + break; + case DATA_TYPE_NVLIST_ARRAY: + ret = nvpair_native_embedded_array(nvs, nvp); + break; + case DATA_TYPE_STRING_ARRAY: + nvpair_native_string_array(nvs, nvp); + break; + default: + break; + } + + return (ret); +} + +static int +nvs_native_nvp_size(nvstream_t *nvs, nvpair_t *nvp, size_t *size) +{ + uint64_t nvp_sz = nvp->nvp_size; + + switch (NVP_TYPE(nvp)) { + case DATA_TYPE_NVLIST: { + size_t nvsize = 0; + + if (nvs_operation(nvs, EMBEDDED_NVL(nvp), &nvsize) != 0) + return (EINVAL); + + nvp_sz += nvsize; + break; + } + case DATA_TYPE_NVLIST_ARRAY: { + size_t nvsize; + + if (nvs_embedded_nvl_array(nvs, nvp, &nvsize) != 0) + return (EINVAL); + + nvp_sz += nvsize; + break; + } + default: + break; + } + + if (nvp_sz > INT32_MAX) + return (EINVAL); + + *size = nvp_sz; + + return (0); +} + +static int +nvs_native_nvpair(nvstream_t *nvs, nvpair_t *nvp, size_t *size) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + return (nvs_native_nvp_op(nvs, nvp)); + + case NVS_OP_DECODE: { + nvs_native_t *native = (nvs_native_t *)nvs->nvs_private; + int32_t decode_len; + + /* try to read the size value from the stream */ + if (native->n_curr + sizeof (int32_t) > native->n_end) + return (EFAULT); + bcopy(native->n_curr, &decode_len, sizeof (int32_t)); + + /* sanity check the size value */ + if (decode_len < 0 || + decode_len > native->n_end - native->n_curr) + return (EFAULT); + + *size = decode_len; + + /* + * If at the end of the stream then move the cursor + * forward, otherwise nvpair_native_op() will read + * the entire nvpair at the same cursor position. + */ + if (*size == 0) + native->n_curr += sizeof (int32_t); + break; + } + + default: + return (EINVAL); + } + + return (0); +} + +static const nvs_ops_t nvs_native_ops = { + nvs_native_nvlist, + nvs_native_nvpair, + nvs_native_nvp_op, + nvs_native_nvp_size, + nvs_native_nvl_fini +}; + +static int +nvs_native(nvstream_t *nvs, nvlist_t *nvl, char *buf, size_t *buflen) +{ + nvs_native_t native; + int err; + + nvs->nvs_ops = &nvs_native_ops; + + if ((err = nvs_native_create(nvs, &native, buf + sizeof (nvs_header_t), + *buflen - sizeof (nvs_header_t))) != 0) + return (err); + + err = nvs_operation(nvs, nvl, buflen); + + nvs_native_destroy(nvs); + + return (err); +} + +/* + * XDR encoding functions + * + * An xdr packed nvlist is encoded as: + * + * - encoding methode and host endian (4 bytes) + * - nvl_version (4 bytes) + * - nvl_nvflag (4 bytes) + * + * - encoded nvpairs, the format of one xdr encoded nvpair is: + * - encoded size of the nvpair (4 bytes) + * - decoded size of the nvpair (4 bytes) + * - name string, (4 + sizeof(NV_ALIGN4(string)) + * a string is coded as size (4 bytes) and data + * - data type (4 bytes) + * - number of elements in the nvpair (4 bytes) + * - data + * + * - 2 zero's for end of the entire list (8 bytes) + */ +static int +nvs_xdr_create(nvstream_t *nvs, XDR *xdr, char *buf, size_t buflen) +{ + /* xdr data must be 4 byte aligned */ + if ((ulong_t)buf % 4 != 0) + return (EFAULT); + + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + xdrmem_create(xdr, buf, (uint_t)buflen, XDR_ENCODE); + nvs->nvs_private = xdr; + return (0); + case NVS_OP_DECODE: + xdrmem_create(xdr, buf, (uint_t)buflen, XDR_DECODE); + nvs->nvs_private = xdr; + return (0); + case NVS_OP_GETSIZE: + nvs->nvs_private = NULL; + return (0); + default: + return (EINVAL); + } +} + +static void +nvs_xdr_destroy(nvstream_t *nvs) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + case NVS_OP_DECODE: + xdr_destroy((XDR *)nvs->nvs_private); + break; + default: + break; + } +} + +static int +nvs_xdr_nvlist(nvstream_t *nvs, nvlist_t *nvl, size_t *size) +{ + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: + case NVS_OP_DECODE: { + XDR *xdr = nvs->nvs_private; + + if (!xdr_int(xdr, &nvl->nvl_version) || + !xdr_u_int(xdr, &nvl->nvl_nvflag)) + return (EFAULT); + break; + } + case NVS_OP_GETSIZE: { + /* + * 2 * 4 for nvl_version + nvl_nvflag + * and 8 for end of the entire list + */ + *size += 2 * 4 + 8; + break; + } + default: + return (EINVAL); + } + return (0); +} + +static int +nvs_xdr_nvl_fini(nvstream_t *nvs) +{ + if (nvs->nvs_op == NVS_OP_ENCODE) { + XDR *xdr = nvs->nvs_private; + int zero = 0; + + if (!xdr_int(xdr, &zero) || !xdr_int(xdr, &zero)) + return (EFAULT); + } + + return (0); +} + +/* + * The format of xdr encoded nvpair is: + * encode_size, decode_size, name string, data type, nelem, data + */ +static int +nvs_xdr_nvp_op(nvstream_t *nvs, nvpair_t *nvp) +{ + data_type_t type; + char *buf; + char *buf_end = (char *)nvp + nvp->nvp_size; + int value_sz; + uint_t nelem, buflen; + bool_t ret = FALSE; + XDR *xdr = nvs->nvs_private; + + ASSERT(xdr != NULL && nvp != NULL); + + /* name string */ + if ((buf = NVP_NAME(nvp)) >= buf_end) + return (EFAULT); + buflen = buf_end - buf; + + if (!xdr_string(xdr, &buf, buflen - 1)) + return (EFAULT); + nvp->nvp_name_sz = strlen(buf) + 1; + + /* type and nelem */ + if (!xdr_int(xdr, (int *)&nvp->nvp_type) || + !xdr_int(xdr, &nvp->nvp_value_elem)) + return (EFAULT); + + type = NVP_TYPE(nvp); + nelem = nvp->nvp_value_elem; + + /* + * Verify type and nelem and get the value size. + * In case of data types DATA_TYPE_STRING and DATA_TYPE_STRING_ARRAY + * is the size of the string(s) excluded. + */ + if ((value_sz = i_get_value_size(type, NULL, nelem)) < 0) + return (EFAULT); + + /* if there is no data to extract then return */ + if (nelem == 0) + return (0); + + /* value */ + if ((buf = NVP_VALUE(nvp)) >= buf_end) + return (EFAULT); + buflen = buf_end - buf; + + if (buflen < value_sz) + return (EFAULT); + + switch (type) { + case DATA_TYPE_NVLIST: + if (nvs_embedded(nvs, (void *)buf) == 0) + return (0); + break; + + case DATA_TYPE_NVLIST_ARRAY: + if (nvs_embedded_nvl_array(nvs, nvp, NULL) == 0) + return (0); + break; + + case DATA_TYPE_BOOLEAN: + ret = TRUE; + break; + + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + ret = xdr_char(xdr, buf); + break; + + case DATA_TYPE_INT16: + ret = xdr_short(xdr, (void *)buf); + break; + + case DATA_TYPE_UINT16: + ret = xdr_u_short(xdr, (void *)buf); + break; + + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_INT32: + ret = xdr_int(xdr, (void *)buf); + break; + + case DATA_TYPE_UINT32: + ret = xdr_u_int(xdr, (void *)buf); + break; + + case DATA_TYPE_INT64: + ret = xdr_longlong_t(xdr, (void *)buf); + break; + + case DATA_TYPE_UINT64: + ret = xdr_u_longlong_t(xdr, (void *)buf); + break; + + case DATA_TYPE_HRTIME: + /* + * NOTE: must expose the definition of hrtime_t here + */ + ret = xdr_longlong_t(xdr, (void *)buf); + break; +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: + ret = xdr_double(xdr, (void *)buf); + break; +#endif + case DATA_TYPE_STRING: + ret = xdr_string(xdr, &buf, buflen - 1); + break; + + case DATA_TYPE_BYTE_ARRAY: + ret = xdr_opaque(xdr, buf, nelem); + break; + + case DATA_TYPE_INT8_ARRAY: + case DATA_TYPE_UINT8_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen, sizeof (int8_t), + (xdrproc_t)xdr_char); + break; + + case DATA_TYPE_INT16_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (int16_t), + sizeof (int16_t), (xdrproc_t)xdr_short); + break; + + case DATA_TYPE_UINT16_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (uint16_t), + sizeof (uint16_t), (xdrproc_t)xdr_u_short); + break; + + case DATA_TYPE_BOOLEAN_ARRAY: + case DATA_TYPE_INT32_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (int32_t), + sizeof (int32_t), (xdrproc_t)xdr_int); + break; + + case DATA_TYPE_UINT32_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (uint32_t), + sizeof (uint32_t), (xdrproc_t)xdr_u_int); + break; + + case DATA_TYPE_INT64_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (int64_t), + sizeof (int64_t), (xdrproc_t)xdr_longlong_t); + break; + + case DATA_TYPE_UINT64_ARRAY: + ret = xdr_array(xdr, &buf, &nelem, buflen / sizeof (uint64_t), + sizeof (uint64_t), (xdrproc_t)xdr_u_longlong_t); + break; + + case DATA_TYPE_STRING_ARRAY: { + size_t len = nelem * sizeof (uint64_t); + char **strp = (void *)buf; + int i; + + if (nvs->nvs_op == NVS_OP_DECODE) + bzero(buf, len); /* don't trust packed data */ + + for (i = 0; i < nelem; i++) { + if (buflen <= len) + return (EFAULT); + + buf += len; + buflen -= len; + + if (xdr_string(xdr, &buf, buflen - 1) != TRUE) + return (EFAULT); + + if (nvs->nvs_op == NVS_OP_DECODE) + strp[i] = buf; + len = strlen(buf) + 1; + } + ret = TRUE; + break; + } + default: + break; + } + + return (ret == TRUE ? 0 : EFAULT); +} + +static int +nvs_xdr_nvp_size(nvstream_t *nvs, nvpair_t *nvp, size_t *size) +{ + data_type_t type = NVP_TYPE(nvp); + /* + * encode_size + decode_size + name string size + data type + nelem + * where name string size = 4 + NV_ALIGN4(strlen(NVP_NAME(nvp))) + */ + uint64_t nvp_sz = 4 + 4 + 4 + NV_ALIGN4(strlen(NVP_NAME(nvp))) + 4 + 4; + + switch (type) { + case DATA_TYPE_BOOLEAN: + break; + + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + case DATA_TYPE_INT16: + case DATA_TYPE_UINT16: + case DATA_TYPE_INT32: + case DATA_TYPE_UINT32: + nvp_sz += 4; /* 4 is the minimum xdr unit */ + break; + + case DATA_TYPE_INT64: + case DATA_TYPE_UINT64: + case DATA_TYPE_HRTIME: +#if !defined(_KERNEL) + case DATA_TYPE_DOUBLE: +#endif + nvp_sz += 8; + break; + + case DATA_TYPE_STRING: + nvp_sz += 4 + NV_ALIGN4(strlen((char *)NVP_VALUE(nvp))); + break; + + case DATA_TYPE_BYTE_ARRAY: + nvp_sz += NV_ALIGN4(NVP_NELEM(nvp)); + break; + + case DATA_TYPE_BOOLEAN_ARRAY: + case DATA_TYPE_INT8_ARRAY: + case DATA_TYPE_UINT8_ARRAY: + case DATA_TYPE_INT16_ARRAY: + case DATA_TYPE_UINT16_ARRAY: + case DATA_TYPE_INT32_ARRAY: + case DATA_TYPE_UINT32_ARRAY: + nvp_sz += 4 + 4 * (uint64_t)NVP_NELEM(nvp); + break; + + case DATA_TYPE_INT64_ARRAY: + case DATA_TYPE_UINT64_ARRAY: + nvp_sz += 4 + 8 * (uint64_t)NVP_NELEM(nvp); + break; + + case DATA_TYPE_STRING_ARRAY: { + int i; + char **strs = (void *)NVP_VALUE(nvp); + + for (i = 0; i < NVP_NELEM(nvp); i++) + nvp_sz += 4 + NV_ALIGN4(strlen(strs[i])); + + break; + } + + case DATA_TYPE_NVLIST: + case DATA_TYPE_NVLIST_ARRAY: { + size_t nvsize = 0; + int old_nvs_op = nvs->nvs_op; + int err; + + nvs->nvs_op = NVS_OP_GETSIZE; + if (type == DATA_TYPE_NVLIST) + err = nvs_operation(nvs, EMBEDDED_NVL(nvp), &nvsize); + else + err = nvs_embedded_nvl_array(nvs, nvp, &nvsize); + nvs->nvs_op = old_nvs_op; + + if (err != 0) + return (EINVAL); + + nvp_sz += nvsize; + break; + } + + default: + return (EINVAL); + } + + if (nvp_sz > INT32_MAX) + return (EINVAL); + + *size = nvp_sz; + + return (0); +} + + +/* + * The NVS_XDR_MAX_LEN macro takes a packed xdr buffer of size x and estimates + * the largest nvpair that could be encoded in the buffer. + * + * See comments above nvpair_xdr_op() for the format of xdr encoding. + * The size of a xdr packed nvpair without any data is 5 words. + * + * Using the size of the data directly as an estimate would be ok + * in all cases except one. If the data type is of DATA_TYPE_STRING_ARRAY + * then the actual nvpair has space for an array of pointers to index + * the strings. These pointers are not encoded into the packed xdr buffer. + * + * If the data is of type DATA_TYPE_STRING_ARRAY and all the strings are + * of length 0, then each string is endcoded in xdr format as a single word. + * Therefore when expanded to an nvpair there will be 2.25 word used for + * each string. (a int64_t allocated for pointer usage, and a single char + * for the null termination.) + * + * This is the calculation performed by the NVS_XDR_MAX_LEN macro. + */ +#define NVS_XDR_HDR_LEN ((size_t)(5 * 4)) +#define NVS_XDR_DATA_LEN(y) (((size_t)(y) <= NVS_XDR_HDR_LEN) ? \ + 0 : ((size_t)(y) - NVS_XDR_HDR_LEN)) +#define NVS_XDR_MAX_LEN(x) (NVP_SIZE_CALC(1, 0) + \ + (NVS_XDR_DATA_LEN(x) * 2) + \ + NV_ALIGN4((NVS_XDR_DATA_LEN(x) / 4))) + +static int +nvs_xdr_nvpair(nvstream_t *nvs, nvpair_t *nvp, size_t *size) +{ + XDR *xdr = nvs->nvs_private; + int32_t encode_len, decode_len; + + switch (nvs->nvs_op) { + case NVS_OP_ENCODE: { + size_t nvsize; + + if (nvs_xdr_nvp_size(nvs, nvp, &nvsize) != 0) + return (EFAULT); + + decode_len = nvp->nvp_size; + encode_len = nvsize; + if (!xdr_int(xdr, &encode_len) || !xdr_int(xdr, &decode_len)) + return (EFAULT); + + return (nvs_xdr_nvp_op(nvs, nvp)); + } + case NVS_OP_DECODE: { + struct xdr_bytesrec bytesrec; + + /* get the encode and decode size */ + if (!xdr_int(xdr, &encode_len) || !xdr_int(xdr, &decode_len)) + return (EFAULT); + *size = decode_len; + + /* are we at the end of the stream? */ + if (*size == 0) + return (0); + + /* sanity check the size parameter */ + if (!xdr_control(xdr, XDR_GET_BYTES_AVAIL, &bytesrec)) + return (EFAULT); + + if (*size > NVS_XDR_MAX_LEN(bytesrec.xc_num_avail)) + return (EFAULT); + break; + } + + default: + return (EINVAL); + } + return (0); +} + +static const struct nvs_ops nvs_xdr_ops = { + nvs_xdr_nvlist, + nvs_xdr_nvpair, + nvs_xdr_nvp_op, + nvs_xdr_nvp_size, + nvs_xdr_nvl_fini +}; + +static int +nvs_xdr(nvstream_t *nvs, nvlist_t *nvl, char *buf, size_t *buflen) +{ + XDR xdr; + int err; + + nvs->nvs_ops = &nvs_xdr_ops; + + if ((err = nvs_xdr_create(nvs, &xdr, buf + sizeof (nvs_header_t), + *buflen - sizeof (nvs_header_t))) != 0) + return (err); + + err = nvs_operation(nvs, nvl, buflen); + + nvs_xdr_destroy(nvs); + + return (err); +} diff --git a/external/cddl/osnet/dist/common/nvpair/nvpair_alloc_fixed.c b/external/cddl/osnet/dist/common/nvpair/nvpair_alloc_fixed.c new file mode 100644 index 000000000000..b1128eeb9bc3 --- /dev/null +++ b/external/cddl/osnet/dist/common/nvpair/nvpair_alloc_fixed.c @@ -0,0 +1,120 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#if defined(_KERNEL) && !defined(_BOOT) +#include +#else +#include +#include +#endif + +/* + * This allocator is very simple. + * - it uses a pre-allocated buffer for memory allocations. + * - it does _not_ free memory in the pre-allocated buffer. + * + * The reason for the selected implemention is simplicity. + * This allocator is designed for the usage in interrupt context when + * the caller may not wait for free memory. + */ + +/* pre-allocated buffer for memory allocations */ +typedef struct nvbuf { + uintptr_t nvb_buf; /* address of pre-allocated buffer */ + uintptr_t nvb_lim; /* limit address in the buffer */ + uintptr_t nvb_cur; /* current address in the buffer */ +} nvbuf_t; + +/* + * Initialize the pre-allocated buffer allocator. The caller needs to supply + * + * buf address of pre-allocated buffer + * bufsz size of pre-allocated buffer + * + * nv_fixed_init() calculates the remaining members of nvbuf_t. + */ +static int +nv_fixed_init(nv_alloc_t *nva, va_list valist) +{ + uintptr_t base = va_arg(valist, uintptr_t); + uintptr_t lim = base + va_arg(valist, size_t); + nvbuf_t *nvb = (nvbuf_t *)P2ROUNDUP(base, sizeof (uintptr_t)); + + if (base == 0 || (uintptr_t)&nvb[1] > lim) + return (EINVAL); + + nvb->nvb_buf = (uintptr_t)&nvb[0]; + nvb->nvb_cur = (uintptr_t)&nvb[1]; + nvb->nvb_lim = lim; + nva->nva_arg = nvb; + + return (0); +} + +static void * +nv_fixed_alloc(nv_alloc_t *nva, size_t size) +{ + nvbuf_t *nvb = nva->nva_arg; + uintptr_t new = nvb->nvb_cur; + + if (size == 0 || new + size > nvb->nvb_lim) + return (NULL); + + nvb->nvb_cur = P2ROUNDUP(new + size, sizeof (uintptr_t)); + + return ((void *)new); +} + +/*ARGSUSED*/ +static void +nv_fixed_free(nv_alloc_t *nva, void *buf, size_t size) +{ + /* don't free memory in the pre-allocated buffer */ +} + +static void +nv_fixed_reset(nv_alloc_t *nva) +{ + nvbuf_t *nvb = nva->nva_arg; + + nvb->nvb_cur = (uintptr_t)&nvb[1]; +} + +const nv_alloc_ops_t nv_fixed_ops_def = { + nv_fixed_init, /* nv_ao_init() */ + NULL, /* nv_ao_fini() */ + nv_fixed_alloc, /* nv_ao_alloc() */ + nv_fixed_free, /* nv_ao_free() */ + nv_fixed_reset /* nv_ao_reset() */ +}; + +const nv_alloc_ops_t *nv_fixed_ops = &nv_fixed_ops_def; diff --git a/external/cddl/osnet/dist/common/unicode/u8_textprep.c b/external/cddl/osnet/dist/common/unicode/u8_textprep.c new file mode 100644 index 000000000000..8faf1a97e47e --- /dev/null +++ b/external/cddl/osnet/dist/common/unicode/u8_textprep.c @@ -0,0 +1,2132 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + + +/* + * UTF-8 text preparation functions (PSARC/2007/149, PSARC/2007/458). + * + * Man pages: u8_textprep_open(9F), u8_textprep_buf(9F), u8_textprep_close(9F), + * u8_textprep_str(9F), u8_strcmp(9F), and u8_validate(9F). See also + * the section 3C man pages. + * Interface stability: Committed. + */ + +#include +#ifdef _KERNEL +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#endif /* _KERNEL */ +#include +#include +#include + + +/* The maximum possible number of bytes in a UTF-8 character. */ +#define U8_MB_CUR_MAX (4) + +/* + * The maximum number of bytes needed for a UTF-8 character to cover + * U+0000 - U+FFFF, i.e., the coding space of now deprecated UCS-2. + */ +#define U8_MAX_BYTES_UCS2 (3) + +/* The maximum possible number of bytes in a Stream-Safe Text. */ +#define U8_STREAM_SAFE_TEXT_MAX (128) + +/* + * The maximum number of characters in a combining/conjoining sequence and + * the actual upperbound limit of a combining/conjoining sequence. + */ +#define U8_MAX_CHARS_A_SEQ (32) +#define U8_UPPER_LIMIT_IN_A_SEQ (31) + +/* The combining class value for Starter. */ +#define U8_COMBINING_CLASS_STARTER (0) + +/* + * Some Hangul related macros at below. + * + * The first and the last of Hangul syllables, Hangul Jamo Leading consonants, + * Vowels, and optional Trailing consonants in Unicode scalar values. + * + * Please be noted that the U8_HANGUL_JAMO_T_FIRST is 0x11A7 at below not + * the actual U+11A8. This is due to that the trailing consonant is optional + * and thus we are doing a pre-calculation of subtracting one. + * + * Each of 19 modern leading consonants has total 588 possible syllables since + * Hangul has 21 modern vowels and 27 modern trailing consonants plus 1 for + * no trailing consonant case, i.e., 21 x 28 = 588. + * + * We also have bunch of Hangul related macros at below. Please bear in mind + * that the U8_HANGUL_JAMO_1ST_BYTE can be used to check whether it is + * a Hangul Jamo or not but the value does not guarantee that it is a Hangul + * Jamo; it just guarantee that it will be most likely. + */ +#define U8_HANGUL_SYL_FIRST (0xAC00U) +#define U8_HANGUL_SYL_LAST (0xD7A3U) + +#define U8_HANGUL_JAMO_L_FIRST (0x1100U) +#define U8_HANGUL_JAMO_L_LAST (0x1112U) +#define U8_HANGUL_JAMO_V_FIRST (0x1161U) +#define U8_HANGUL_JAMO_V_LAST (0x1175U) +#define U8_HANGUL_JAMO_T_FIRST (0x11A7U) +#define U8_HANGUL_JAMO_T_LAST (0x11C2U) + +#define U8_HANGUL_V_COUNT (21) +#define U8_HANGUL_VT_COUNT (588) +#define U8_HANGUL_T_COUNT (28) + +#define U8_HANGUL_JAMO_1ST_BYTE (0xE1U) + +#define U8_SAVE_HANGUL_AS_UTF8(s, i, j, k, b) \ + (s)[(i)] = (uchar_t)(0xE0U | ((uint32_t)(b) & 0xF000U) >> 12); \ + (s)[(j)] = (uchar_t)(0x80U | ((uint32_t)(b) & 0x0FC0U) >> 6); \ + (s)[(k)] = (uchar_t)(0x80U | ((uint32_t)(b) & 0x003FU)); + +#define U8_HANGUL_JAMO_L(u) \ + ((u) >= U8_HANGUL_JAMO_L_FIRST && (u) <= U8_HANGUL_JAMO_L_LAST) + +#define U8_HANGUL_JAMO_V(u) \ + ((u) >= U8_HANGUL_JAMO_V_FIRST && (u) <= U8_HANGUL_JAMO_V_LAST) + +#define U8_HANGUL_JAMO_T(u) \ + ((u) > U8_HANGUL_JAMO_T_FIRST && (u) <= U8_HANGUL_JAMO_T_LAST) + +#define U8_HANGUL_JAMO(u) \ + ((u) >= U8_HANGUL_JAMO_L_FIRST && (u) <= U8_HANGUL_JAMO_T_LAST) + +#define U8_HANGUL_SYLLABLE(u) \ + ((u) >= U8_HANGUL_SYL_FIRST && (u) <= U8_HANGUL_SYL_LAST) + +#define U8_HANGUL_COMPOSABLE_L_V(s, u) \ + ((s) == U8_STATE_HANGUL_L && U8_HANGUL_JAMO_V((u))) + +#define U8_HANGUL_COMPOSABLE_LV_T(s, u) \ + ((s) == U8_STATE_HANGUL_LV && U8_HANGUL_JAMO_T((u))) + +/* The types of decomposition mappings. */ +#define U8_DECOMP_BOTH (0xF5U) +#define U8_DECOMP_CANONICAL (0xF6U) + +/* The indicator for 16-bit table. */ +#define U8_16BIT_TABLE_INDICATOR (0x8000U) + +/* The following are some convenience macros. */ +#define U8_PUT_3BYTES_INTO_UTF32(u, b1, b2, b3) \ + (u) = ((uint32_t)(b1) & 0x0F) << 12 | ((uint32_t)(b2) & 0x3F) << 6 | \ + (uint32_t)(b3) & 0x3F; + +#define U8_SIMPLE_SWAP(a, b, t) \ + (t) = (a); \ + (a) = (b); \ + (b) = (t); + +#define U8_ASCII_TOUPPER(c) \ + (((c) >= 'a' && (c) <= 'z') ? (c) - 'a' + 'A' : (c)) + +#define U8_ASCII_TOLOWER(c) \ + (((c) >= 'A' && (c) <= 'Z') ? (c) - 'A' + 'a' : (c)) + +#define U8_ISASCII(c) (((uchar_t)(c)) < 0x80U) +/* + * The following macro assumes that the two characters that are to be + * swapped are adjacent to each other and 'a' comes before 'b'. + * + * If the assumptions are not met, then, the macro will fail. + */ +#define U8_SWAP_COMB_MARKS(a, b) \ + for (k = 0; k < disp[(a)]; k++) \ + u8t[k] = u8s[start[(a)] + k]; \ + for (k = 0; k < disp[(b)]; k++) \ + u8s[start[(a)] + k] = u8s[start[(b)] + k]; \ + start[(b)] = start[(a)] + disp[(b)]; \ + for (k = 0; k < disp[(a)]; k++) \ + u8s[start[(b)] + k] = u8t[k]; \ + U8_SIMPLE_SWAP(comb_class[(a)], comb_class[(b)], tc); \ + U8_SIMPLE_SWAP(disp[(a)], disp[(b)], tc); + +/* The possible states during normalization. */ +typedef enum { + U8_STATE_START = 0, + U8_STATE_HANGUL_L = 1, + U8_STATE_HANGUL_LV = 2, + U8_STATE_HANGUL_LVT = 3, + U8_STATE_HANGUL_V = 4, + U8_STATE_HANGUL_T = 5, + U8_STATE_COMBINING_MARK = 6 +} u8_normalization_states_t; + +/* + * The three vectors at below are used to check bytes of a given UTF-8 + * character are valid and not containing any malformed byte values. + * + * We used to have a quite relaxed UTF-8 binary representation but then there + * was some security related issues and so the Unicode Consortium defined + * and announced the UTF-8 Corrigendum at Unicode 3.1 and then refined it + * one more time at the Unicode 3.2. The following three tables are based on + * that. + */ + +#define U8_ILLEGAL_NEXT_BYTE_COMMON(c) ((c) < 0x80 || (c) > 0xBF) + +#define I_ U8_ILLEGAL_CHAR +#define O_ U8_OUT_OF_RANGE_CHAR + +const int8_t u8_number_of_bytes[0x100] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + +/* 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F */ + I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, + +/* 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F */ + I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, + +/* A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF */ + I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, + +/* B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF */ + I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, I_, + +/* C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF */ + I_, I_, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + +/* D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF */ + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + +/* E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF */ + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + +/* F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF */ + 4, 4, 4, 4, 4, O_, O_, O_, O_, O_, O_, O_, O_, O_, O_, O_, +}; + +#undef I_ +#undef O_ + +const uint8_t u8_valid_min_2nd_byte[0x100] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +/* C0 C1 C2 C3 C4 C5 C6 C7 */ + 0, 0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* C8 C9 CA CB CC CD CE CF */ + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* D0 D1 D2 D3 D4 D5 D6 D7 */ + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* D8 D9 DA DB DC DD DE DF */ + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* E0 E1 E2 E3 E4 E5 E6 E7 */ + 0xa0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* E8 E9 EA EB EC ED EE EF */ + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +/* F0 F1 F2 F3 F4 F5 F6 F7 */ + 0x90, 0x80, 0x80, 0x80, 0x80, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +}; + +const uint8_t u8_valid_max_2nd_byte[0x100] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +/* C0 C1 C2 C3 C4 C5 C6 C7 */ + 0, 0, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, +/* C8 C9 CA CB CC CD CE CF */ + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, +/* D0 D1 D2 D3 D4 D5 D6 D7 */ + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, +/* D8 D9 DA DB DC DD DE DF */ + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, +/* E0 E1 E2 E3 E4 E5 E6 E7 */ + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, +/* E8 E9 EA EB EC ED EE EF */ + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0x9f, 0xbf, 0xbf, +/* F0 F1 F2 F3 F4 F5 F6 F7 */ + 0xbf, 0xbf, 0xbf, 0xbf, 0x8f, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +}; + + +/* + * The u8_validate() validates on the given UTF-8 character string and + * calculate the byte length. It is quite similar to mblen(3C) except that + * this will validate against the list of characters if required and + * specific to UTF-8 and Unicode. + */ +int +u8_validate(char *u8str, size_t n, char **list, int flag, int *errnum) +{ + uchar_t *ib; + uchar_t *ibtail; + uchar_t **p; + uchar_t *s1; + uchar_t *s2; + uchar_t f; + int sz; + size_t i; + int ret_val; + boolean_t second; + boolean_t no_need_to_validate_entire; + boolean_t check_additional; + boolean_t validate_ucs2_range_only; + + if (! u8str) + return (0); + + ib = (uchar_t *)u8str; + ibtail = ib + n; + + ret_val = 0; + + no_need_to_validate_entire = ! (flag & U8_VALIDATE_ENTIRE); + check_additional = flag & U8_VALIDATE_CHECK_ADDITIONAL; + validate_ucs2_range_only = flag & U8_VALIDATE_UCS2_RANGE; + + while (ib < ibtail) { + /* + * The first byte of a UTF-8 character tells how many + * bytes will follow for the character. If the first byte + * is an illegal byte value or out of range value, we just + * return -1 with an appropriate error number. + */ + sz = u8_number_of_bytes[*ib]; + if (sz == U8_ILLEGAL_CHAR) { + *errnum = EILSEQ; + return (-1); + } + + if (sz == U8_OUT_OF_RANGE_CHAR || + (validate_ucs2_range_only && sz > U8_MAX_BYTES_UCS2)) { + *errnum = ERANGE; + return (-1); + } + + /* + * If we don't have enough bytes to check on, that's also + * an error. As you can see, we give illegal byte sequence + * checking higher priority then EINVAL cases. + */ + if ((ibtail - ib) < sz) { + *errnum = EINVAL; + return (-1); + } + + if (sz == 1) { + ib++; + ret_val++; + } else { + /* + * Check on the multi-byte UTF-8 character. For more + * details on this, see comment added for the used + * data structures at the beginning of the file. + */ + f = *ib++; + ret_val++; + second = B_TRUE; + for (i = 1; i < sz; i++) { + if (second) { + if (*ib < u8_valid_min_2nd_byte[f] || + *ib > u8_valid_max_2nd_byte[f]) { + *errnum = EILSEQ; + return (-1); + } + second = B_FALSE; + } else if (U8_ILLEGAL_NEXT_BYTE_COMMON(*ib)) { + *errnum = EILSEQ; + return (-1); + } + ib++; + ret_val++; + } + } + + if (check_additional) { + for (p = (uchar_t **)list, i = 0; p[i]; i++) { + s1 = ib - sz; + s2 = p[i]; + while (s1 < ib) { + if (*s1 != *s2 || *s2 == '\0') + break; + s1++; + s2++; + } + + if (s1 >= ib && *s2 == '\0') { + *errnum = EBADF; + return (-1); + } + } + } + + if (no_need_to_validate_entire) + break; + } + + return (ret_val); +} + +/* + * The do_case_conv() looks at the mapping tables and returns found + * bytes if any. If not found, the input bytes are returned. The function + * always terminate the return bytes with a null character assuming that + * there are plenty of room to do so. + * + * The case conversions are simple case conversions mapping a character to + * another character as specified in the Unicode data. The byte size of + * the mapped character could be different from that of the input character. + * + * The return value is the byte length of the returned character excluding + * the terminating null byte. + */ +static size_t +do_case_conv(int uv, uchar_t *u8s, uchar_t *s, int sz, boolean_t is_it_toupper) +{ + size_t i; + uint16_t b1 = 0; + uint16_t b2 = 0; + uint16_t b3 = 0; + uint16_t b3_tbl; + uint16_t b3_base; + uint16_t b4 = 0; + size_t start_id; + size_t end_id; + + /* + * At this point, the only possible values for sz are 2, 3, and 4. + * The u8s should point to a vector that is well beyond the size of + * 5 bytes. + */ + if (sz == 2) { + b3 = u8s[0] = s[0]; + b4 = u8s[1] = s[1]; + } else if (sz == 3) { + b2 = u8s[0] = s[0]; + b3 = u8s[1] = s[1]; + b4 = u8s[2] = s[2]; + } else if (sz == 4) { + b1 = u8s[0] = s[0]; + b2 = u8s[1] = s[1]; + b3 = u8s[2] = s[2]; + b4 = u8s[3] = s[3]; + } else { + /* This is not possible but just in case as a fallback. */ + if (is_it_toupper) + *u8s = U8_ASCII_TOUPPER(*s); + else + *u8s = U8_ASCII_TOLOWER(*s); + u8s[1] = '\0'; + + return (1); + } + u8s[sz] = '\0'; + + /* + * Let's find out if we have a corresponding character. + */ + b1 = u8_common_b1_tbl[uv][b1]; + if (b1 == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + b2 = u8_case_common_b2_tbl[uv][b1][b2]; + if (b2 == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + if (is_it_toupper) { + b3_tbl = u8_toupper_b3_tbl[uv][b2][b3].tbl_id; + if (b3_tbl == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + start_id = u8_toupper_b4_tbl[uv][b3_tbl][b4]; + end_id = u8_toupper_b4_tbl[uv][b3_tbl][b4 + 1]; + + /* Either there is no match or an error at the table. */ + if (start_id >= end_id || (end_id - start_id) > U8_MB_CUR_MAX) + return ((size_t)sz); + + b3_base = u8_toupper_b3_tbl[uv][b2][b3].base; + + for (i = 0; start_id < end_id; start_id++) + u8s[i++] = u8_toupper_final_tbl[uv][b3_base + start_id]; + } else { + b3_tbl = u8_tolower_b3_tbl[uv][b2][b3].tbl_id; + if (b3_tbl == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + start_id = u8_tolower_b4_tbl[uv][b3_tbl][b4]; + end_id = u8_tolower_b4_tbl[uv][b3_tbl][b4 + 1]; + + if (start_id >= end_id || (end_id - start_id) > U8_MB_CUR_MAX) + return ((size_t)sz); + + b3_base = u8_tolower_b3_tbl[uv][b2][b3].base; + + for (i = 0; start_id < end_id; start_id++) + u8s[i++] = u8_tolower_final_tbl[uv][b3_base + start_id]; + } + + /* + * If i is still zero, that means there is no corresponding character. + */ + if (i == 0) + return ((size_t)sz); + + u8s[i] = '\0'; + + return (i); +} + +/* + * The do_case_compare() function compares the two input strings, s1 and s2, + * one character at a time doing case conversions if applicable and return + * the comparison result as like strcmp(). + * + * Since, in empirical sense, most of text data are 7-bit ASCII characters, + * we treat the 7-bit ASCII characters as a special case trying to yield + * faster processing time. + */ +static int +do_case_compare(size_t uv, uchar_t *s1, uchar_t *s2, size_t n1, + size_t n2, boolean_t is_it_toupper, int *errnum) +{ + int f; + int sz1; + int sz2; + size_t j; + size_t i1; + size_t i2; + uchar_t u8s1[U8_MB_CUR_MAX + 1]; + uchar_t u8s2[U8_MB_CUR_MAX + 1]; + + i1 = i2 = 0; + while (i1 < n1 && i2 < n2) { + /* + * Find out what would be the byte length for this UTF-8 + * character at string s1 and also find out if this is + * an illegal start byte or not and if so, issue a proper + * error number and yet treat this byte as a character. + */ + sz1 = u8_number_of_bytes[*s1]; + if (sz1 < 0) { + *errnum = EILSEQ; + sz1 = 1; + } + + /* + * For 7-bit ASCII characters mainly, we do a quick case + * conversion right at here. + * + * If we don't have enough bytes for this character, issue + * an EINVAL error and use what are available. + * + * If we have enough bytes, find out if there is + * a corresponding uppercase character and if so, copy over + * the bytes for a comparison later. If there is no + * corresponding uppercase character, then, use what we have + * for the comparison. + */ + if (sz1 == 1) { + if (is_it_toupper) + u8s1[0] = U8_ASCII_TOUPPER(*s1); + else + u8s1[0] = U8_ASCII_TOLOWER(*s1); + s1++; + u8s1[1] = '\0'; + } else if ((i1 + sz1) > n1) { + *errnum = EINVAL; + for (j = 0; (i1 + j) < n1; ) + u8s1[j++] = *s1++; + u8s1[j] = '\0'; + } else { + (void) do_case_conv(uv, u8s1, s1, sz1, is_it_toupper); + s1 += sz1; + } + + /* Do the same for the string s2. */ + sz2 = u8_number_of_bytes[*s2]; + if (sz2 < 0) { + *errnum = EILSEQ; + sz2 = 1; + } + + if (sz2 == 1) { + if (is_it_toupper) + u8s2[0] = U8_ASCII_TOUPPER(*s2); + else + u8s2[0] = U8_ASCII_TOLOWER(*s2); + s2++; + u8s2[1] = '\0'; + } else if ((i2 + sz2) > n2) { + *errnum = EINVAL; + for (j = 0; (i2 + j) < n2; ) + u8s2[j++] = *s2++; + u8s2[j] = '\0'; + } else { + (void) do_case_conv(uv, u8s2, s2, sz2, is_it_toupper); + s2 += sz2; + } + + /* Now compare the two characters. */ + if (sz1 == 1 && sz2 == 1) { + if (*u8s1 > *u8s2) + return (1); + if (*u8s1 < *u8s2) + return (-1); + } else { + f = strcmp((const char *)u8s1, (const char *)u8s2); + if (f != 0) + return (f); + } + + /* + * They were the same. Let's move on to the next + * characters then. + */ + i1 += sz1; + i2 += sz2; + } + + /* + * We compared until the end of either or both strings. + * + * If we reached to or went over the ends for the both, that means + * they are the same. + * + * If we reached only one of the two ends, that means the other string + * has something which then the fact can be used to determine + * the return value. + */ + if (i1 >= n1) { + if (i2 >= n2) + return (0); + return (-1); + } + return (1); +} + +/* + * The combining_class() function checks on the given bytes and find out + * the corresponding Unicode combining class value. The return value 0 means + * it is a Starter. Any illegal UTF-8 character will also be treated as + * a Starter. + */ +static uchar_t +combining_class(size_t uv, uchar_t *s, size_t sz) +{ + uint16_t b1 = 0; + uint16_t b2 = 0; + uint16_t b3 = 0; + uint16_t b4 = 0; + + if (sz == 1 || sz > 4) + return (0); + + if (sz == 2) { + b3 = s[0]; + b4 = s[1]; + } else if (sz == 3) { + b2 = s[0]; + b3 = s[1]; + b4 = s[2]; + } else if (sz == 4) { + b1 = s[0]; + b2 = s[1]; + b3 = s[2]; + b4 = s[3]; + } + + b1 = u8_common_b1_tbl[uv][b1]; + if (b1 == U8_TBL_ELEMENT_NOT_DEF) + return (0); + + b2 = u8_combining_class_b2_tbl[uv][b1][b2]; + if (b2 == U8_TBL_ELEMENT_NOT_DEF) + return (0); + + b3 = u8_combining_class_b3_tbl[uv][b2][b3]; + if (b3 == U8_TBL_ELEMENT_NOT_DEF) + return (0); + + return (u8_combining_class_b4_tbl[uv][b3][b4]); +} + +/* + * The do_decomp() function finds out a matching decomposition if any + * and return. If there is no match, the input bytes are copied and returned. + * The function also checks if there is a Hangul, decomposes it if necessary + * and returns. + * + * To save time, a single byte 7-bit ASCII character should be handled by + * the caller. + * + * The function returns the number of bytes returned sans always terminating + * the null byte. It will also return a state that will tell if there was + * a Hangul character decomposed which then will be used by the caller. + */ +static size_t +do_decomp(size_t uv, uchar_t *u8s, uchar_t *s, int sz, + boolean_t canonical_decomposition, u8_normalization_states_t *state) +{ + uint16_t b1 = 0; + uint16_t b2 = 0; + uint16_t b3 = 0; + uint16_t b3_tbl; + uint16_t b3_base; + uint16_t b4 = 0; + size_t start_id; + size_t end_id; + size_t i; + uint32_t u1; + + if (sz == 2) { + b3 = u8s[0] = s[0]; + b4 = u8s[1] = s[1]; + u8s[2] = '\0'; + } else if (sz == 3) { + /* Convert it to a Unicode scalar value. */ + U8_PUT_3BYTES_INTO_UTF32(u1, s[0], s[1], s[2]); + + /* + * If this is a Hangul syllable, we decompose it into + * a leading consonant, a vowel, and an optional trailing + * consonant and then return. + */ + if (U8_HANGUL_SYLLABLE(u1)) { + u1 -= U8_HANGUL_SYL_FIRST; + + b1 = U8_HANGUL_JAMO_L_FIRST + u1 / U8_HANGUL_VT_COUNT; + b2 = U8_HANGUL_JAMO_V_FIRST + (u1 % U8_HANGUL_VT_COUNT) + / U8_HANGUL_T_COUNT; + b3 = u1 % U8_HANGUL_T_COUNT; + + U8_SAVE_HANGUL_AS_UTF8(u8s, 0, 1, 2, b1); + U8_SAVE_HANGUL_AS_UTF8(u8s, 3, 4, 5, b2); + if (b3) { + b3 += U8_HANGUL_JAMO_T_FIRST; + U8_SAVE_HANGUL_AS_UTF8(u8s, 6, 7, 8, b3); + + u8s[9] = '\0'; + *state = U8_STATE_HANGUL_LVT; + return (9); + } + + u8s[6] = '\0'; + *state = U8_STATE_HANGUL_LV; + return (6); + } + + b2 = u8s[0] = s[0]; + b3 = u8s[1] = s[1]; + b4 = u8s[2] = s[2]; + u8s[3] = '\0'; + + /* + * If this is a Hangul Jamo, we know there is nothing + * further that we can decompose. + */ + if (U8_HANGUL_JAMO_L(u1)) { + *state = U8_STATE_HANGUL_L; + return (3); + } + + if (U8_HANGUL_JAMO_V(u1)) { + if (*state == U8_STATE_HANGUL_L) + *state = U8_STATE_HANGUL_LV; + else + *state = U8_STATE_HANGUL_V; + return (3); + } + + if (U8_HANGUL_JAMO_T(u1)) { + if (*state == U8_STATE_HANGUL_LV) + *state = U8_STATE_HANGUL_LVT; + else + *state = U8_STATE_HANGUL_T; + return (3); + } + } else if (sz == 4) { + b1 = u8s[0] = s[0]; + b2 = u8s[1] = s[1]; + b3 = u8s[2] = s[2]; + b4 = u8s[3] = s[3]; + u8s[4] = '\0'; + } else { + /* + * This is a fallback and should not happen if the function + * was called properly. + */ + u8s[0] = s[0]; + u8s[1] = '\0'; + *state = U8_STATE_START; + return (1); + } + + /* + * At this point, this rountine does not know what it would get. + * The caller should sort it out if the state isn't a Hangul one. + */ + *state = U8_STATE_START; + + /* Try to find matching decomposition mapping byte sequence. */ + b1 = u8_common_b1_tbl[uv][b1]; + if (b1 == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + b2 = u8_decomp_b2_tbl[uv][b1][b2]; + if (b2 == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + b3_tbl = u8_decomp_b3_tbl[uv][b2][b3].tbl_id; + if (b3_tbl == U8_TBL_ELEMENT_NOT_DEF) + return ((size_t)sz); + + /* + * If b3_tbl is bigger than or equal to U8_16BIT_TABLE_INDICATOR + * which is 0x8000, this means we couldn't fit the mappings into + * the cardinality of a unsigned byte. + */ + if (b3_tbl >= U8_16BIT_TABLE_INDICATOR) { + b3_tbl -= U8_16BIT_TABLE_INDICATOR; + start_id = u8_decomp_b4_16bit_tbl[uv][b3_tbl][b4]; + end_id = u8_decomp_b4_16bit_tbl[uv][b3_tbl][b4 + 1]; + } else { + start_id = u8_decomp_b4_tbl[uv][b3_tbl][b4]; + end_id = u8_decomp_b4_tbl[uv][b3_tbl][b4 + 1]; + } + + /* This also means there wasn't any matching decomposition. */ + if (start_id >= end_id) + return ((size_t)sz); + + /* + * The final table for decomposition mappings has three types of + * byte sequences depending on whether a mapping is for compatibility + * decomposition, canonical decomposition, or both like the following: + * + * (1) Compatibility decomposition mappings: + * + * +---+---+-...-+---+ + * | B0| B1| ... | Bm| + * +---+---+-...-+---+ + * + * The first byte, B0, is always less then 0xF5 (U8_DECOMP_BOTH). + * + * (2) Canonical decomposition mappings: + * + * +---+---+---+-...-+---+ + * | T | b0| b1| ... | bn| + * +---+---+---+-...-+---+ + * + * where the first byte, T, is 0xF6 (U8_DECOMP_CANONICAL). + * + * (3) Both mappings: + * + * +---+---+---+---+-...-+---+---+---+-...-+---+ + * | T | D | b0| b1| ... | bn| B0| B1| ... | Bm| + * +---+---+---+---+-...-+---+---+---+-...-+---+ + * + * where T is 0xF5 (U8_DECOMP_BOTH) and D is a displacement + * byte, b0 to bn are canonical mapping bytes and B0 to Bm are + * compatibility mapping bytes. + * + * Note that compatibility decomposition means doing recursive + * decompositions using both compatibility decomposition mappings and + * canonical decomposition mappings. On the other hand, canonical + * decomposition means doing recursive decompositions using only + * canonical decomposition mappings. Since the table we have has gone + * through the recursions already, we do not need to do so during + * runtime, i.e., the table has been completely flattened out + * already. + */ + + b3_base = u8_decomp_b3_tbl[uv][b2][b3].base; + + /* Get the type, T, of the byte sequence. */ + b1 = u8_decomp_final_tbl[uv][b3_base + start_id]; + + /* + * If necessary, adjust start_id, end_id, or both. Note that if + * this is compatibility decomposition mapping, there is no + * adjustment. + */ + if (canonical_decomposition) { + /* Is the mapping only for compatibility decomposition? */ + if (b1 < U8_DECOMP_BOTH) + return ((size_t)sz); + + start_id++; + + if (b1 == U8_DECOMP_BOTH) { + end_id = start_id + + u8_decomp_final_tbl[uv][b3_base + start_id]; + start_id++; + } + } else { + /* + * Unless this is a compatibility decomposition mapping, + * we adjust the start_id. + */ + if (b1 == U8_DECOMP_BOTH) { + start_id++; + start_id += u8_decomp_final_tbl[uv][b3_base + start_id]; + } else if (b1 == U8_DECOMP_CANONICAL) { + start_id++; + } + } + + for (i = 0; start_id < end_id; start_id++) + u8s[i++] = u8_decomp_final_tbl[uv][b3_base + start_id]; + u8s[i] = '\0'; + + return (i); +} + +/* + * The find_composition_start() function uses the character bytes given and + * find out the matching composition mappings if any and return the address + * to the composition mappings as explained in the do_composition(). + */ +static uchar_t * +find_composition_start(size_t uv, uchar_t *s, size_t sz) +{ + uint16_t b1 = 0; + uint16_t b2 = 0; + uint16_t b3 = 0; + uint16_t b3_tbl; + uint16_t b3_base; + uint16_t b4 = 0; + size_t start_id; + size_t end_id; + + if (sz == 1) { + b4 = s[0]; + } else if (sz == 2) { + b3 = s[0]; + b4 = s[1]; + } else if (sz == 3) { + b2 = s[0]; + b3 = s[1]; + b4 = s[2]; + } else if (sz == 4) { + b1 = s[0]; + b2 = s[1]; + b3 = s[2]; + b4 = s[3]; + } else { + /* + * This is a fallback and should not happen if the function + * was called properly. + */ + return (NULL); + } + + b1 = u8_composition_b1_tbl[uv][b1]; + if (b1 == U8_TBL_ELEMENT_NOT_DEF) + return (NULL); + + b2 = u8_composition_b2_tbl[uv][b1][b2]; + if (b2 == U8_TBL_ELEMENT_NOT_DEF) + return (NULL); + + b3_tbl = u8_composition_b3_tbl[uv][b2][b3].tbl_id; + if (b3_tbl == U8_TBL_ELEMENT_NOT_DEF) + return (NULL); + + if (b3_tbl >= U8_16BIT_TABLE_INDICATOR) { + b3_tbl -= U8_16BIT_TABLE_INDICATOR; + start_id = u8_composition_b4_16bit_tbl[uv][b3_tbl][b4]; + end_id = u8_composition_b4_16bit_tbl[uv][b3_tbl][b4 + 1]; + } else { + start_id = u8_composition_b4_tbl[uv][b3_tbl][b4]; + end_id = u8_composition_b4_tbl[uv][b3_tbl][b4 + 1]; + } + + if (start_id >= end_id) + return (NULL); + + b3_base = u8_composition_b3_tbl[uv][b2][b3].base; + + return ((uchar_t *)&(u8_composition_final_tbl[uv][b3_base + start_id])); +} + +/* + * The blocked() function checks on the combining class values of previous + * characters in this sequence and return whether it is blocked or not. + */ +static boolean_t +blocked(uchar_t *comb_class, size_t last) +{ + uchar_t my_comb_class; + size_t i; + + my_comb_class = comb_class[last]; + for (i = 1; i < last; i++) + if (comb_class[i] >= my_comb_class || + comb_class[i] == U8_COMBINING_CLASS_STARTER) + return (B_TRUE); + + return (B_FALSE); +} + +/* + * The do_composition() reads the character string pointed by 's' and + * do necessary canonical composition and then copy over the result back to + * the 's'. + * + * The input argument 's' cannot contain more than 32 characters. + */ +static size_t +do_composition(size_t uv, uchar_t *s, uchar_t *comb_class, uchar_t *start, + uchar_t *disp, size_t last, uchar_t **os, uchar_t *oslast) +{ + uchar_t t[U8_STREAM_SAFE_TEXT_MAX + 1]; + uchar_t tc[U8_MB_CUR_MAX]; + uint8_t saved_marks[U8_MAX_CHARS_A_SEQ]; + size_t saved_marks_count; + uchar_t *p; + uchar_t *saved_p; + uchar_t *q; + size_t i; + size_t saved_i; + size_t j; + size_t k; + size_t l; + size_t C; + size_t saved_l; + size_t size; + uint32_t u1; + uint32_t u2; + boolean_t match_not_found = B_TRUE; + + /* + * This should never happen unless the callers are doing some strange + * and unexpected things. + * + * The "last" is the index pointing to the last character not last + 1. + */ + if (last >= U8_MAX_CHARS_A_SEQ) + last = U8_UPPER_LIMIT_IN_A_SEQ; + + for (i = l = 0; i <= last; i++) { + /* + * The last or any non-Starters at the beginning, we don't + * have any chance to do composition and so we just copy them + * to the temporary buffer. + */ + if (i >= last || comb_class[i] != U8_COMBINING_CLASS_STARTER) { +SAVE_THE_CHAR: + p = s + start[i]; + size = disp[i]; + for (k = 0; k < size; k++) + t[l++] = *p++; + continue; + } + + /* + * If this could be a start of Hangul Jamos, then, we try to + * conjoin them. + */ + if (s[start[i]] == U8_HANGUL_JAMO_1ST_BYTE) { + U8_PUT_3BYTES_INTO_UTF32(u1, s[start[i]], + s[start[i] + 1], s[start[i] + 2]); + U8_PUT_3BYTES_INTO_UTF32(u2, s[start[i] + 3], + s[start[i] + 4], s[start[i] + 5]); + + if (U8_HANGUL_JAMO_L(u1) && U8_HANGUL_JAMO_V(u2)) { + u1 -= U8_HANGUL_JAMO_L_FIRST; + u2 -= U8_HANGUL_JAMO_V_FIRST; + u1 = U8_HANGUL_SYL_FIRST + + (u1 * U8_HANGUL_V_COUNT + u2) * + U8_HANGUL_T_COUNT; + + i += 2; + if (i <= last) { + U8_PUT_3BYTES_INTO_UTF32(u2, + s[start[i]], s[start[i] + 1], + s[start[i] + 2]); + + if (U8_HANGUL_JAMO_T(u2)) { + u1 += u2 - + U8_HANGUL_JAMO_T_FIRST; + i++; + } + } + + U8_SAVE_HANGUL_AS_UTF8(t + l, 0, 1, 2, u1); + i--; + l += 3; + continue; + } + } + + /* + * Let's then find out if this Starter has composition + * mapping. + */ + p = find_composition_start(uv, s + start[i], disp[i]); + if (p == NULL) + goto SAVE_THE_CHAR; + + /* + * We have a Starter with composition mapping and the next + * character is a non-Starter. Let's try to find out if + * we can do composition. + */ + + saved_p = p; + saved_i = i; + saved_l = l; + saved_marks_count = 0; + +TRY_THE_NEXT_MARK: + q = s + start[++i]; + size = disp[i]; + + /* + * The next for() loop compares the non-Starter pointed by + * 'q' with the possible (joinable) characters pointed by 'p'. + * + * The composition final table entry pointed by the 'p' + * looks like the following: + * + * +---+---+---+-...-+---+---+---+---+-...-+---+---+ + * | C | b0| b2| ... | bn| F | B0| B1| ... | Bm| F | + * +---+---+---+-...-+---+---+---+---+-...-+---+---+ + * + * where C is the count byte indicating the number of + * mapping pairs where each pair would be look like + * (b0-bn F, B0-Bm F). The b0-bn are the bytes of the second + * character of a canonical decomposition and the B0-Bm are + * the bytes of a matching composite character. The F is + * a filler byte after each character as the separator. + */ + + match_not_found = B_TRUE; + + for (C = *p++; C > 0; C--) { + for (k = 0; k < size; p++, k++) + if (*p != q[k]) + break; + + /* Have we found it? */ + if (k >= size && *p == U8_TBL_ELEMENT_FILLER) { + match_not_found = B_FALSE; + + l = saved_l; + + while (*++p != U8_TBL_ELEMENT_FILLER) + t[l++] = *p; + + break; + } + + /* We didn't find; skip to the next pair. */ + if (*p != U8_TBL_ELEMENT_FILLER) + while (*++p != U8_TBL_ELEMENT_FILLER) + ; + while (*++p != U8_TBL_ELEMENT_FILLER) + ; + p++; + } + + /* + * If there was no match, we will need to save the combining + * mark for later appending. After that, if the next one + * is a non-Starter and not blocked, then, we try once + * again to do composition with the next non-Starter. + * + * If there was no match and this was a Starter, then, + * this is a new start. + * + * If there was a match and a composition done and we have + * more to check on, then, we retrieve a new composition final + * table entry for the composite and then try to do the + * composition again. + */ + + if (match_not_found) { + if (comb_class[i] == U8_COMBINING_CLASS_STARTER) { + i--; + goto SAVE_THE_CHAR; + } + + saved_marks[saved_marks_count++] = i; + } + + if (saved_l == l) { + while (i < last) { + if (blocked(comb_class, i + 1)) + saved_marks[saved_marks_count++] = ++i; + else + break; + } + if (i < last) { + p = saved_p; + goto TRY_THE_NEXT_MARK; + } + } else if (i < last) { + p = find_composition_start(uv, t + saved_l, + l - saved_l); + if (p != NULL) { + saved_p = p; + goto TRY_THE_NEXT_MARK; + } + } + + /* + * There is no more composition possible. + * + * If there was no composition what so ever then we copy + * over the original Starter and then append any non-Starters + * remaining at the target string sequentially after that. + */ + + if (saved_l == l) { + p = s + start[saved_i]; + size = disp[saved_i]; + for (j = 0; j < size; j++) + t[l++] = *p++; + } + + for (k = 0; k < saved_marks_count; k++) { + p = s + start[saved_marks[k]]; + size = disp[saved_marks[k]]; + for (j = 0; j < size; j++) + t[l++] = *p++; + } + } + + /* + * If the last character is a Starter and if we have a character + * (possibly another Starter) that can be turned into a composite, + * we do so and we do so until there is no more of composition + * possible. + */ + if (comb_class[last] == U8_COMBINING_CLASS_STARTER) { + p = *os; + saved_l = l - disp[last]; + + while (p < oslast) { + size = u8_number_of_bytes[*p]; + if (size <= 1 || (p + size) > oslast) + break; + + saved_p = p; + + for (i = 0; i < size; i++) + tc[i] = *p++; + + q = find_composition_start(uv, t + saved_l, + l - saved_l); + if (q == NULL) { + p = saved_p; + break; + } + + match_not_found = B_TRUE; + + for (C = *q++; C > 0; C--) { + for (k = 0; k < size; q++, k++) + if (*q != tc[k]) + break; + + if (k >= size && *q == U8_TBL_ELEMENT_FILLER) { + match_not_found = B_FALSE; + + l = saved_l; + + while (*++q != U8_TBL_ELEMENT_FILLER) { + /* + * This is practically + * impossible but we don't + * want to take any chances. + */ + if (l >= + U8_STREAM_SAFE_TEXT_MAX) { + p = saved_p; + goto SAFE_RETURN; + } + t[l++] = *q; + } + + break; + } + + if (*q != U8_TBL_ELEMENT_FILLER) + while (*++q != U8_TBL_ELEMENT_FILLER) + ; + while (*++q != U8_TBL_ELEMENT_FILLER) + ; + q++; + } + + if (match_not_found) { + p = saved_p; + break; + } + } +SAFE_RETURN: + *os = p; + } + + /* + * Now we copy over the temporary string to the target string. + * Since composition always reduces the number of characters or + * the number of characters stay, we don't need to worry about + * the buffer overflow here. + */ + for (i = 0; i < l; i++) + s[i] = t[i]; + s[l] = '\0'; + + return (l); +} + +/* + * The collect_a_seq() function checks on the given string s, collect + * a sequence of characters at u8s, and return the sequence. While it collects + * a sequence, it also applies case conversion, canonical or compatibility + * decomposition, canonical decomposition, or some or all of them and + * in that order. + * + * The collected sequence cannot be bigger than 32 characters since if + * it is having more than 31 characters, the sequence will be terminated + * with a U+034F COMBINING GRAPHEME JOINER (CGJ) character and turned into + * a Stream-Safe Text. The collected sequence is always terminated with + * a null byte and the return value is the byte length of the sequence + * including 0. The return value does not include the terminating + * null byte. + */ +static size_t +collect_a_seq(size_t uv, uchar_t *u8s, uchar_t **source, uchar_t *slast, + boolean_t is_it_toupper, + boolean_t is_it_tolower, + boolean_t canonical_decomposition, + boolean_t compatibility_decomposition, + boolean_t canonical_composition, + int *errnum, u8_normalization_states_t *state) +{ + uchar_t *s; + int sz; + int saved_sz; + size_t i; + size_t j; + size_t k; + size_t l; + uchar_t comb_class[U8_MAX_CHARS_A_SEQ]; + uchar_t disp[U8_MAX_CHARS_A_SEQ]; + uchar_t start[U8_MAX_CHARS_A_SEQ]; + uchar_t u8t[U8_MB_CUR_MAX]; + uchar_t uts[U8_STREAM_SAFE_TEXT_MAX + 1]; + uchar_t tc; + size_t last; + size_t saved_last; + uint32_t u1; + + /* + * Save the source string pointer which we will return a changed + * pointer if we do processing. + */ + s = *source; + + /* + * The following is a fallback for just in case callers are not + * checking the string boundaries before the calling. + */ + if (s >= slast) { + u8s[0] = '\0'; + + return (0); + } + + /* + * As the first thing, let's collect a character and do case + * conversion if necessary. + */ + + sz = u8_number_of_bytes[*s]; + + if (sz < 0) { + *errnum = EILSEQ; + + u8s[0] = *s++; + u8s[1] = '\0'; + + *source = s; + + return (1); + } + + if (sz == 1) { + if (is_it_toupper) + u8s[0] = U8_ASCII_TOUPPER(*s); + else if (is_it_tolower) + u8s[0] = U8_ASCII_TOLOWER(*s); + else + u8s[0] = *s; + s++; + u8s[1] = '\0'; + } else if ((s + sz) > slast) { + *errnum = EINVAL; + + for (i = 0; s < slast; ) + u8s[i++] = *s++; + u8s[i] = '\0'; + + *source = s; + + return (i); + } else { + if (is_it_toupper || is_it_tolower) { + i = do_case_conv(uv, u8s, s, sz, is_it_toupper); + s += sz; + sz = i; + } else { + for (i = 0; i < sz; ) + u8s[i++] = *s++; + u8s[i] = '\0'; + } + } + + /* + * And then canonical/compatibility decomposition followed by + * an optional canonical composition. Please be noted that + * canonical composition is done only when a decomposition is + * done. + */ + if (canonical_decomposition || compatibility_decomposition) { + if (sz == 1) { + *state = U8_STATE_START; + + saved_sz = 1; + + comb_class[0] = 0; + start[0] = 0; + disp[0] = 1; + + last = 1; + } else { + saved_sz = do_decomp(uv, u8s, u8s, sz, + canonical_decomposition, state); + + last = 0; + + for (i = 0; i < saved_sz; ) { + sz = u8_number_of_bytes[u8s[i]]; + + comb_class[last] = combining_class(uv, + u8s + i, sz); + start[last] = i; + disp[last] = sz; + + last++; + i += sz; + } + + /* + * Decomposition yields various Hangul related + * states but not on combining marks. We need to + * find out at here by checking on the last + * character. + */ + if (*state == U8_STATE_START) { + if (comb_class[last - 1]) + *state = U8_STATE_COMBINING_MARK; + } + } + + saved_last = last; + + while (s < slast) { + sz = u8_number_of_bytes[*s]; + + /* + * If this is an illegal character, an incomplete + * character, or an 7-bit ASCII Starter character, + * then we have collected a sequence; break and let + * the next call deal with the two cases. + * + * Note that this is okay only if you are using this + * function with a fixed length string, not on + * a buffer with multiple calls of one chunk at a time. + */ + if (sz <= 1) { + break; + } else if ((s + sz) > slast) { + break; + } else { + /* + * If the previous character was a Hangul Jamo + * and this character is a Hangul Jamo that + * can be conjoined, we collect the Jamo. + */ + if (*s == U8_HANGUL_JAMO_1ST_BYTE) { + U8_PUT_3BYTES_INTO_UTF32(u1, + *s, *(s + 1), *(s + 2)); + + if (U8_HANGUL_COMPOSABLE_L_V(*state, + u1)) { + i = 0; + *state = U8_STATE_HANGUL_LV; + goto COLLECT_A_HANGUL; + } + + if (U8_HANGUL_COMPOSABLE_LV_T(*state, + u1)) { + i = 0; + *state = U8_STATE_HANGUL_LVT; + goto COLLECT_A_HANGUL; + } + } + + /* + * Regardless of whatever it was, if this is + * a Starter, we don't collect the character + * since that's a new start and we will deal + * with it at the next time. + */ + i = combining_class(uv, s, sz); + if (i == U8_COMBINING_CLASS_STARTER) + break; + + /* + * We know the current character is a combining + * mark. If the previous character wasn't + * a Starter (not Hangul) or a combining mark, + * then, we don't collect this combining mark. + */ + if (*state != U8_STATE_START && + *state != U8_STATE_COMBINING_MARK) + break; + + *state = U8_STATE_COMBINING_MARK; +COLLECT_A_HANGUL: + /* + * If we collected a Starter and combining + * marks up to 30, i.e., total 31 characters, + * then, we terminate this degenerately long + * combining sequence with a U+034F COMBINING + * GRAPHEME JOINER (CGJ) which is 0xCD 0x8F in + * UTF-8 and turn this into a Stream-Safe + * Text. This will be extremely rare but + * possible. + * + * The following will also guarantee that + * we are not writing more than 32 characters + * plus a NULL at u8s[]. + */ + if (last >= U8_UPPER_LIMIT_IN_A_SEQ) { +TURN_STREAM_SAFE: + *state = U8_STATE_START; + comb_class[last] = 0; + start[last] = saved_sz; + disp[last] = 2; + last++; + + u8s[saved_sz++] = 0xCD; + u8s[saved_sz++] = 0x8F; + + break; + } + + /* + * Some combining marks also do decompose into + * another combining mark or marks. + */ + if (*state == U8_STATE_COMBINING_MARK) { + k = last; + l = sz; + i = do_decomp(uv, uts, s, sz, + canonical_decomposition, state); + for (j = 0; j < i; ) { + sz = u8_number_of_bytes[uts[j]]; + + comb_class[last] = + combining_class(uv, + uts + j, sz); + start[last] = saved_sz + j; + disp[last] = sz; + + last++; + if (last >= + U8_UPPER_LIMIT_IN_A_SEQ) { + last = k; + goto TURN_STREAM_SAFE; + } + j += sz; + } + + *state = U8_STATE_COMBINING_MARK; + sz = i; + s += l; + + for (i = 0; i < sz; i++) + u8s[saved_sz++] = uts[i]; + } else { + comb_class[last] = i; + start[last] = saved_sz; + disp[last] = sz; + last++; + + for (i = 0; i < sz; i++) + u8s[saved_sz++] = *s++; + } + + /* + * If this is U+0345 COMBINING GREEK + * YPOGEGRAMMENI (0xCD 0x85 in UTF-8), a.k.a., + * iota subscript, and need to be converted to + * uppercase letter, convert it to U+0399 GREEK + * CAPITAL LETTER IOTA (0xCE 0x99 in UTF-8), + * i.e., convert to capital adscript form as + * specified in the Unicode standard. + * + * This is the only special case of (ambiguous) + * case conversion at combining marks and + * probably the standard will never have + * anything similar like this in future. + */ + if (is_it_toupper && sz >= 2 && + u8s[saved_sz - 2] == 0xCD && + u8s[saved_sz - 1] == 0x85) { + u8s[saved_sz - 2] = 0xCE; + u8s[saved_sz - 1] = 0x99; + } + } + } + + /* + * Let's try to ensure a canonical ordering for the collected + * combining marks. We do this only if we have collected + * at least one more non-Starter. (The decomposition mapping + * data tables have fully (and recursively) expanded and + * canonically ordered decompositions.) + * + * The U8_SWAP_COMB_MARKS() convenience macro has some + * assumptions and we are meeting the assumptions. + */ + last--; + if (last >= saved_last) { + for (i = 0; i < last; i++) + for (j = last; j > i; j--) + if (comb_class[j] && + comb_class[j - 1] > comb_class[j]) { + U8_SWAP_COMB_MARKS(j - 1, j); + } + } + + *source = s; + + if (! canonical_composition) { + u8s[saved_sz] = '\0'; + return (saved_sz); + } + + /* + * Now do the canonical composition. Note that we do this + * only after a canonical or compatibility decomposition to + * finish up NFC or NFKC. + */ + sz = do_composition(uv, u8s, comb_class, start, disp, last, + &s, slast); + } + + *source = s; + + return ((size_t)sz); +} + +/* + * The do_norm_compare() function does string comparion based on Unicode + * simple case mappings and Unicode Normalization definitions. + * + * It does so by collecting a sequence of character at a time and comparing + * the collected sequences from the strings. + * + * The meanings on the return values are the same as the usual strcmp(). + */ +static int +do_norm_compare(size_t uv, uchar_t *s1, uchar_t *s2, size_t n1, size_t n2, + int flag, int *errnum) +{ + int result; + size_t sz1; + size_t sz2; + uchar_t u8s1[U8_STREAM_SAFE_TEXT_MAX + 1]; + uchar_t u8s2[U8_STREAM_SAFE_TEXT_MAX + 1]; + uchar_t *s1last; + uchar_t *s2last; + boolean_t is_it_toupper; + boolean_t is_it_tolower; + boolean_t canonical_decomposition; + boolean_t compatibility_decomposition; + boolean_t canonical_composition; + u8_normalization_states_t state; + + s1last = s1 + n1; + s2last = s2 + n2; + + is_it_toupper = flag & U8_TEXTPREP_TOUPPER; + is_it_tolower = flag & U8_TEXTPREP_TOLOWER; + canonical_decomposition = flag & U8_CANON_DECOMP; + compatibility_decomposition = flag & U8_COMPAT_DECOMP; + canonical_composition = flag & U8_CANON_COMP; + + while (s1 < s1last && s2 < s2last) { + /* + * If the current character is a 7-bit ASCII and the last + * character, or, if the current character and the next + * character are both some 7-bit ASCII characters then + * we treat the current character as a sequence. + * + * In any other cases, we need to call collect_a_seq(). + */ + + if (U8_ISASCII(*s1) && ((s1 + 1) >= s1last || + ((s1 + 1) < s1last && U8_ISASCII(*(s1 + 1))))) { + if (is_it_toupper) + u8s1[0] = U8_ASCII_TOUPPER(*s1); + else if (is_it_tolower) + u8s1[0] = U8_ASCII_TOLOWER(*s1); + else + u8s1[0] = *s1; + u8s1[1] = '\0'; + sz1 = 1; + s1++; + } else { + state = U8_STATE_START; + sz1 = collect_a_seq(uv, u8s1, &s1, s1last, + is_it_toupper, is_it_tolower, + canonical_decomposition, + compatibility_decomposition, + canonical_composition, errnum, &state); + } + + if (U8_ISASCII(*s2) && ((s2 + 1) >= s2last || + ((s2 + 1) < s2last && U8_ISASCII(*(s2 + 1))))) { + if (is_it_toupper) + u8s2[0] = U8_ASCII_TOUPPER(*s2); + else if (is_it_tolower) + u8s2[0] = U8_ASCII_TOLOWER(*s2); + else + u8s2[0] = *s2; + u8s2[1] = '\0'; + sz2 = 1; + s2++; + } else { + state = U8_STATE_START; + sz2 = collect_a_seq(uv, u8s2, &s2, s2last, + is_it_toupper, is_it_tolower, + canonical_decomposition, + compatibility_decomposition, + canonical_composition, errnum, &state); + } + + /* + * Now compare the two characters. If they are the same, + * we move on to the next character sequences. + */ + if (sz1 == 1 && sz2 == 1) { + if (*u8s1 > *u8s2) + return (1); + if (*u8s1 < *u8s2) + return (-1); + } else { + result = strcmp((const char *)u8s1, (const char *)u8s2); + if (result != 0) + return (result); + } + } + + /* + * We compared until the end of either or both strings. + * + * If we reached to or went over the ends for the both, that means + * they are the same. + * + * If we reached only one end, that means the other string has + * something which then can be used to determine the return value. + */ + if (s1 >= s1last) { + if (s2 >= s2last) + return (0); + return (-1); + } + return (1); +} + +/* + * The u8_strcmp() function compares two UTF-8 strings quite similar to + * the strcmp(). For the comparison, however, Unicode Normalization specific + * equivalency and Unicode simple case conversion mappings based equivalency + * can be requested and checked against. + */ +int +u8_strcmp(const char *s1, const char *s2, size_t n, int flag, size_t uv, + int *errnum) +{ + int f; + size_t n1; + size_t n2; + + *errnum = 0; + + /* + * Check on the requested Unicode version, case conversion, and + * normalization flag values. + */ + + if (uv > U8_UNICODE_LATEST) { + *errnum = ERANGE; + uv = U8_UNICODE_LATEST; + } + + if (flag == 0) { + flag = U8_STRCMP_CS; + } else { + f = flag & (U8_STRCMP_CS | U8_STRCMP_CI_UPPER | + U8_STRCMP_CI_LOWER); + if (f == 0) { + flag |= U8_STRCMP_CS; + } else if (f != U8_STRCMP_CS && f != U8_STRCMP_CI_UPPER && + f != U8_STRCMP_CI_LOWER) { + *errnum = EBADF; + flag = U8_STRCMP_CS; + } + + f = flag & (U8_CANON_DECOMP | U8_COMPAT_DECOMP | U8_CANON_COMP); + if (f && f != U8_STRCMP_NFD && f != U8_STRCMP_NFC && + f != U8_STRCMP_NFKD && f != U8_STRCMP_NFKC) { + *errnum = EBADF; + flag = U8_STRCMP_CS; + } + } + + if (flag == U8_STRCMP_CS) { + return (n == 0 ? strcmp(s1, s2) : strncmp(s1, s2, n)); + } + + n1 = strlen(s1); + n2 = strlen(s2); + if (n != 0) { + if (n < n1) + n1 = n; + if (n < n2) + n2 = n; + } + + /* + * Simple case conversion can be done much faster and so we do + * them separately here. + */ + if (flag == U8_STRCMP_CI_UPPER) { + return (do_case_compare(uv, (uchar_t *)s1, (uchar_t *)s2, + n1, n2, B_TRUE, errnum)); + } else if (flag == U8_STRCMP_CI_LOWER) { + return (do_case_compare(uv, (uchar_t *)s1, (uchar_t *)s2, + n1, n2, B_FALSE, errnum)); + } + + return (do_norm_compare(uv, (uchar_t *)s1, (uchar_t *)s2, n1, n2, + flag, errnum)); +} + +size_t +u8_textprep_str(char *inarray, size_t *inlen, char *outarray, size_t *outlen, + int flag, size_t unicode_version, int *errnum) +{ + int f; + int sz; + uchar_t *ib; + uchar_t *ibtail; + uchar_t *ob; + uchar_t *obtail; + boolean_t do_not_ignore_null; + boolean_t do_not_ignore_invalid; + boolean_t is_it_toupper; + boolean_t is_it_tolower; + boolean_t canonical_decomposition; + boolean_t compatibility_decomposition; + boolean_t canonical_composition; + size_t ret_val; + size_t i; + size_t j; + uchar_t u8s[U8_STREAM_SAFE_TEXT_MAX + 1]; + u8_normalization_states_t state; + + if (unicode_version > U8_UNICODE_LATEST) { + *errnum = ERANGE; + return ((size_t)-1); + } + + f = flag & (U8_TEXTPREP_TOUPPER | U8_TEXTPREP_TOLOWER); + if (f == (U8_TEXTPREP_TOUPPER | U8_TEXTPREP_TOLOWER)) { + *errnum = EBADF; + return ((size_t)-1); + } + + f = flag & (U8_CANON_DECOMP | U8_COMPAT_DECOMP | U8_CANON_COMP); + if (f && f != U8_TEXTPREP_NFD && f != U8_TEXTPREP_NFC && + f != U8_TEXTPREP_NFKD && f != U8_TEXTPREP_NFKC) { + *errnum = EBADF; + return ((size_t)-1); + } + + if (inarray == NULL || *inlen == 0) + return (0); + + if (outarray == NULL) { + *errnum = E2BIG; + return ((size_t)-1); + } + + ib = (uchar_t *)inarray; + ob = (uchar_t *)outarray; + ibtail = ib + *inlen; + obtail = ob + *outlen; + + do_not_ignore_null = !(flag & U8_TEXTPREP_IGNORE_NULL); + do_not_ignore_invalid = !(flag & U8_TEXTPREP_IGNORE_INVALID); + is_it_toupper = flag & U8_TEXTPREP_TOUPPER; + is_it_tolower = flag & U8_TEXTPREP_TOLOWER; + + ret_val = 0; + + /* + * If we don't have a normalization flag set, we do the simple case + * conversion based text preparation separately below. Text + * preparation involving Normalization will be done in the false task + * block, again, separately since it will take much more time and + * resource than doing simple case conversions. + */ + if (f == 0) { + while (ib < ibtail) { + if (*ib == '\0' && do_not_ignore_null) + break; + + sz = u8_number_of_bytes[*ib]; + + if (sz < 0) { + if (do_not_ignore_invalid) { + *errnum = EILSEQ; + ret_val = (size_t)-1; + break; + } + + sz = 1; + ret_val++; + } + + if (sz == 1) { + if (ob >= obtail) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + if (is_it_toupper) + *ob = U8_ASCII_TOUPPER(*ib); + else if (is_it_tolower) + *ob = U8_ASCII_TOLOWER(*ib); + else + *ob = *ib; + ib++; + ob++; + } else if ((ib + sz) > ibtail) { + if (do_not_ignore_invalid) { + *errnum = EINVAL; + ret_val = (size_t)-1; + break; + } + + if ((obtail - ob) < (ibtail - ib)) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + /* + * We treat the remaining incomplete character + * bytes as a character. + */ + ret_val++; + + while (ib < ibtail) + *ob++ = *ib++; + } else { + if (is_it_toupper || is_it_tolower) { + i = do_case_conv(unicode_version, u8s, + ib, sz, is_it_toupper); + + if ((obtail - ob) < i) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + ib += sz; + + for (sz = 0; sz < i; sz++) + *ob++ = u8s[sz]; + } else { + if ((obtail - ob) < sz) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + for (i = 0; i < sz; i++) + *ob++ = *ib++; + } + } + } + } else { + canonical_decomposition = flag & U8_CANON_DECOMP; + compatibility_decomposition = flag & U8_COMPAT_DECOMP; + canonical_composition = flag & U8_CANON_COMP; + + while (ib < ibtail) { + if (*ib == '\0' && do_not_ignore_null) + break; + + /* + * If the current character is a 7-bit ASCII + * character and it is the last character, or, + * if the current character is a 7-bit ASCII + * character and the next character is also a 7-bit + * ASCII character, then, we copy over this + * character without going through collect_a_seq(). + * + * In any other cases, we need to look further with + * the collect_a_seq() function. + */ + if (U8_ISASCII(*ib) && ((ib + 1) >= ibtail || + ((ib + 1) < ibtail && U8_ISASCII(*(ib + 1))))) { + if (ob >= obtail) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + if (is_it_toupper) + *ob = U8_ASCII_TOUPPER(*ib); + else if (is_it_tolower) + *ob = U8_ASCII_TOLOWER(*ib); + else + *ob = *ib; + ib++; + ob++; + } else { + *errnum = 0; + state = U8_STATE_START; + + j = collect_a_seq(unicode_version, u8s, + &ib, ibtail, + is_it_toupper, + is_it_tolower, + canonical_decomposition, + compatibility_decomposition, + canonical_composition, + errnum, &state); + + if (*errnum && do_not_ignore_invalid) { + ret_val = (size_t)-1; + break; + } + + if ((obtail - ob) < j) { + *errnum = E2BIG; + ret_val = (size_t)-1; + break; + } + + for (i = 0; i < j; i++) + *ob++ = u8s[i]; + } + } + } + + *inlen = ibtail - ib; + *outlen = obtail - ob; + + return (ret_val); +} diff --git a/external/cddl/osnet/dist/common/zfs/zfs_comutil.c b/external/cddl/osnet/dist/common/zfs/zfs_comutil.c new file mode 100644 index 000000000000..74517a3f6920 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_comutil.c @@ -0,0 +1,65 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * This file is intended for functions that ought to be common between user + * land (libzfs) and the kernel. When many common routines need to be shared + * then a separate file should to be created. + */ + +#if defined(_KERNEL) +#include +#endif + +#include +#include +#include + +/* + * Are there allocatable vdevs? + */ +boolean_t +zfs_allocatable_devs(nvlist_t *nv) +{ + uint64_t is_log; + uint_t c; + nvlist_t **child; + uint_t children; + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) { + return (B_FALSE); + } + for (c = 0; c < children; c++) { + is_log = 0; + (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, + &is_log); + if (!is_log) + return (B_TRUE); + } + return (B_FALSE); +} diff --git a/external/cddl/osnet/dist/common/zfs/zfs_comutil.h b/external/cddl/osnet/dist/common/zfs/zfs_comutil.h new file mode 100644 index 000000000000..f517044a80a0 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_comutil.h @@ -0,0 +1,44 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_COMUTIL_H +#define _ZFS_COMUTIL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern boolean_t zfs_allocatable_devs(nvlist_t *nv); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_COMUTIL_H */ diff --git a/external/cddl/osnet/dist/common/zfs/zfs_deleg.c b/external/cddl/osnet/dist/common/zfs/zfs_deleg.c new file mode 100644 index 000000000000..0fd5800a84dc --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_deleg.c @@ -0,0 +1,234 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + + +#pragma ident "%Z%%M% %I% %E% SMI" + +#if defined(_KERNEL) +#include +#include +#include +#else +#include +#include +#include +#include +#include +#endif +/* XXX includes zfs_context.h, so why bother with the above? */ +#include +#include "zfs_prop.h" +#include "zfs_deleg.h" +#include "zfs_namecheck.h" + +/* + * permission table + * + * Keep this table in sorted order + * + * This table is used for displaying all permissions for + * zfs allow + */ + +zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = { + {ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW}, + {ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE }, + {ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE }, + {ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY }, + {ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT }, + {ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE }, + {ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE }, + {ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME }, + {ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK }, + {ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT }, + {ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE }, + {ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_NONE }, + {ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP }, + {NULL, ZFS_DELEG_NOTE_NONE } +}; + +static int +zfs_valid_permission_name(const char *perm) +{ + if (zfs_deleg_canonicalize_perm(perm)) + return (0); + + return (permset_namecheck(perm, NULL, NULL)); +} + +const char * +zfs_deleg_canonicalize_perm(const char *perm) +{ + int i; + zfs_prop_t prop; + + for (i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) { + if (strcmp(perm, zfs_deleg_perm_tab[i].z_perm) == 0) + return (perm); + } + + prop = zfs_name_to_prop(perm); + if (prop != ZPROP_INVAL && zfs_prop_delegatable(prop)) + return (zfs_prop_to_name(prop)); + return (NULL); + +} + +static int +zfs_validate_who(char *who) +{ + char *p; + + if (who[2] != ZFS_DELEG_FIELD_SEP_CHR) + return (-1); + + switch (who[0]) { + case ZFS_DELEG_USER: + case ZFS_DELEG_GROUP: + case ZFS_DELEG_USER_SETS: + case ZFS_DELEG_GROUP_SETS: + if (who[1] != ZFS_DELEG_LOCAL && who[1] != ZFS_DELEG_DESCENDENT) + return (-1); + for (p = &who[3]; *p; p++) + if (!isdigit(*p)) + return (-1); + break; + + case ZFS_DELEG_NAMED_SET: + case ZFS_DELEG_NAMED_SET_SETS: + if (who[1] != ZFS_DELEG_NA) + return (-1); + return (permset_namecheck(&who[3], NULL, NULL)); + + case ZFS_DELEG_CREATE: + case ZFS_DELEG_CREATE_SETS: + if (who[1] != ZFS_DELEG_NA) + return (-1); + if (who[3] != '\0') + return (-1); + break; + + case ZFS_DELEG_EVERYONE: + case ZFS_DELEG_EVERYONE_SETS: + if (who[1] != ZFS_DELEG_LOCAL && who[1] != ZFS_DELEG_DESCENDENT) + return (-1); + if (who[3] != '\0') + return (-1); + break; + + default: + return (-1); + } + + return (0); +} + +int +zfs_deleg_verify_nvlist(nvlist_t *nvp) +{ + nvpair_t *who, *perm_name; + nvlist_t *perms; + int error; + + if (nvp == NULL) + return (-1); + + who = nvlist_next_nvpair(nvp, NULL); + if (who == NULL) + return (-1); + + do { + if (zfs_validate_who(nvpair_name(who))) + return (-1); + + error = nvlist_lookup_nvlist(nvp, nvpair_name(who), &perms); + + if (error && error != ENOENT) + return (-1); + if (error == ENOENT) + continue; + + perm_name = nvlist_next_nvpair(perms, NULL); + if (perm_name == NULL) { + return (-1); + } + do { + error = zfs_valid_permission_name( + nvpair_name(perm_name)); + if (error) + return (-1); + } while (perm_name = nvlist_next_nvpair(perms, perm_name)); + } while (who = nvlist_next_nvpair(nvp, who)); + return (0); +} + +/* + * Construct the base attribute name. The base attribute names + * are the "key" to locate the jump objects which contain the actual + * permissions. The base attribute names are encoded based on + * type of entry and whether it is a local or descendent permission. + * + * Arguments: + * attr - attribute name return string, attribute is assumed to be + * ZFS_MAX_DELEG_NAME long. + * type - type of entry to construct + * inheritchr - inheritance type (local,descendent, or NA for create and + * permission set definitions + * data - is either a permission set name or a 64 bit uid/gid. + */ +void +zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type, + char inheritchr, void *data) +{ + int len = ZFS_MAX_DELEG_NAME; + uint64_t *id = data; + + switch (type) { + case ZFS_DELEG_USER: + case ZFS_DELEG_GROUP: + case ZFS_DELEG_USER_SETS: + case ZFS_DELEG_GROUP_SETS: + (void) snprintf(attr, len, "%c%c%c%lld", type, inheritchr, + ZFS_DELEG_FIELD_SEP_CHR, (longlong_t)*id); + break; + case ZFS_DELEG_NAMED_SET_SETS: + case ZFS_DELEG_NAMED_SET: + (void) snprintf(attr, len, "%c-%c%s", type, + ZFS_DELEG_FIELD_SEP_CHR, (char *)data); + break; + case ZFS_DELEG_CREATE: + case ZFS_DELEG_CREATE_SETS: + (void) snprintf(attr, len, "%c-%c", type, + ZFS_DELEG_FIELD_SEP_CHR); + break; + case ZFS_DELEG_EVERYONE: + case ZFS_DELEG_EVERYONE_SETS: + (void) snprintf(attr, len, "%c%c%c", type, inheritchr, + ZFS_DELEG_FIELD_SEP_CHR); + break; + default: + ASSERT(!"bad zfs_deleg_who_type_t"); + } +} diff --git a/external/cddl/osnet/dist/common/zfs/zfs_deleg.h b/external/cddl/osnet/dist/common/zfs/zfs_deleg.h new file mode 100644 index 000000000000..561b73e63df4 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_deleg.h @@ -0,0 +1,81 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_DELEG_H +#define _ZFS_DELEG_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZFS_DELEG_SET_NAME_CHR '@' /* set name lead char */ +#define ZFS_DELEG_FIELD_SEP_CHR '$' /* field separator */ + +/* + * Max name length for a delegation attribute + */ +#define ZFS_MAX_DELEG_NAME 128 + +#define ZFS_DELEG_LOCAL 'l' +#define ZFS_DELEG_DESCENDENT 'd' +#define ZFS_DELEG_NA '-' + +typedef enum { + ZFS_DELEG_NOTE_CREATE, + ZFS_DELEG_NOTE_DESTROY, + ZFS_DELEG_NOTE_SNAPSHOT, + ZFS_DELEG_NOTE_ROLLBACK, + ZFS_DELEG_NOTE_CLONE, + ZFS_DELEG_NOTE_PROMOTE, + ZFS_DELEG_NOTE_RENAME, + ZFS_DELEG_NOTE_RECEIVE, + ZFS_DELEG_NOTE_ALLOW, + ZFS_DELEG_NOTE_USERPROP, + ZFS_DELEG_NOTE_MOUNT, + ZFS_DELEG_NOTE_SHARE, + ZFS_DELEG_NOTE_NONE +} zfs_deleg_note_t; + +typedef struct zfs_deleg_perm_tab { + char *z_perm; + zfs_deleg_note_t z_note; +} zfs_deleg_perm_tab_t; + +extern zfs_deleg_perm_tab_t zfs_deleg_perm_tab[]; + +int zfs_deleg_verify_nvlist(nvlist_t *nvlist); +void zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type, + char checkflag, void *data); +const char *zfs_deleg_canonicalize_perm(const char *perm); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_DELEG_H */ diff --git a/external/cddl/osnet/dist/common/zfs/zfs_namecheck.c b/external/cddl/osnet/dist/common/zfs/zfs_namecheck.c new file mode 100644 index 000000000000..a9d109be20ab --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_namecheck.c @@ -0,0 +1,363 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Common name validation routines for ZFS. These routines are shared by the + * userland code as well as the ioctl() layer to ensure that we don't + * inadvertently expose a hole through direct ioctl()s that never gets tested. + * In userland, however, we want significantly more information about _why_ the + * name is invalid. In the kernel, we only care whether it's valid or not. + * Each routine therefore takes a 'namecheck_err_t' which describes exactly why + * the name failed to validate. + * + * Each function returns 0 on success, -1 on error. + */ + +#if defined(_KERNEL) +#include +#else +#include +#endif + +#include +#include +#include "zfs_namecheck.h" +#include "zfs_deleg.h" + +static int +valid_char(char c) +{ + return ((c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '-' || c == '_' || c == '.' || c == ':' || c == ' '); +} + +/* + * Snapshot names must be made up of alphanumeric characters plus the following + * characters: + * + * [-_.:] + */ +int +snapshot_namecheck(const char *path, namecheck_err_t *why, char *what) +{ + const char *loc; + + if (strlen(path) >= MAXNAMELEN) { + if (why) + *why = NAME_ERR_TOOLONG; + return (-1); + } + + if (path[0] == '\0') { + if (why) + *why = NAME_ERR_EMPTY_COMPONENT; + return (-1); + } + + for (loc = path; *loc; loc++) { + if (!valid_char(*loc)) { + if (why) { + *why = NAME_ERR_INVALCHAR; + *what = *loc; + } + return (-1); + } + } + return (0); +} + + +/* + * Permissions set name must start with the letter '@' followed by the + * same character restrictions as snapshot names, except that the name + * cannot exceed 64 characters. + */ +int +permset_namecheck(const char *path, namecheck_err_t *why, char *what) +{ + if (strlen(path) >= ZFS_PERMSET_MAXLEN) { + if (why) + *why = NAME_ERR_TOOLONG; + return (-1); + } + + if (path[0] != '@') { + if (why) { + *why = NAME_ERR_NO_AT; + *what = path[0]; + } + return (-1); + } + + return (snapshot_namecheck(&path[1], why, what)); +} + +/* + * Dataset names must be of the following form: + * + * [component][/]*[component][@component] + * + * Where each component is made up of alphanumeric characters plus the following + * characters: + * + * [-_.:%] + * + * We allow '%' here as we use that character internally to create unique + * names for temporary clones (for online recv). + */ +int +dataset_namecheck(const char *path, namecheck_err_t *why, char *what) +{ + const char *loc, *end; + int found_snapshot; + + /* + * Make sure the name is not too long. + * + * ZFS_MAXNAMELEN is the maximum dataset length used in the userland + * which is the same as MAXNAMELEN used in the kernel. + * If ZFS_MAXNAMELEN value is changed, make sure to cleanup all + * places using MAXNAMELEN. + */ + + if (strlen(path) >= MAXNAMELEN) { + if (why) + *why = NAME_ERR_TOOLONG; + return (-1); + } + + /* Explicitly check for a leading slash. */ + if (path[0] == '/') { + if (why) + *why = NAME_ERR_LEADING_SLASH; + return (-1); + } + + if (path[0] == '\0') { + if (why) + *why = NAME_ERR_EMPTY_COMPONENT; + return (-1); + } + + loc = path; + found_snapshot = 0; + for (;;) { + /* Find the end of this component */ + end = loc; + while (*end != '/' && *end != '@' && *end != '\0') + end++; + + if (*end == '\0' && end[-1] == '/') { + /* trailing slashes are not allowed */ + if (why) + *why = NAME_ERR_TRAILING_SLASH; + return (-1); + } + + /* Zero-length components are not allowed */ + if (loc == end) { + if (why) { + /* + * Make sure this is really a zero-length + * component and not a '@@'. + */ + if (*end == '@' && found_snapshot) { + *why = NAME_ERR_MULTIPLE_AT; + } else { + *why = NAME_ERR_EMPTY_COMPONENT; + } + } + + return (-1); + } + + /* Validate the contents of this component */ + while (loc != end) { + if (!valid_char(*loc) && *loc != '%') { + if (why) { + *why = NAME_ERR_INVALCHAR; + *what = *loc; + } + return (-1); + } + loc++; + } + + /* If we've reached the end of the string, we're OK */ + if (*end == '\0') + return (0); + + if (*end == '@') { + /* + * If we've found an @ symbol, indicate that we're in + * the snapshot component, and report a second '@' + * character as an error. + */ + if (found_snapshot) { + if (why) + *why = NAME_ERR_MULTIPLE_AT; + return (-1); + } + + found_snapshot = 1; + } + + /* + * If there is a '/' in a snapshot name + * then report an error + */ + if (*end == '/' && found_snapshot) { + if (why) + *why = NAME_ERR_TRAILING_SLASH; + return (-1); + } + + /* Update to the next component */ + loc = end + 1; + } +} + + +/* + * mountpoint names must be of the following form: + * + * /[component][/]*[component][/] + */ +int +mountpoint_namecheck(const char *path, namecheck_err_t *why) +{ + const char *start, *end; + + /* + * Make sure none of the mountpoint component names are too long. + * If a component name is too long then the mkdir of the mountpoint + * will fail but then the mountpoint property will be set to a value + * that can never be mounted. Better to fail before setting the prop. + * Extra slashes are OK, they will be tossed by the mountpoint mkdir. + */ + + if (path == NULL || *path != '/') { + if (why) + *why = NAME_ERR_LEADING_SLASH; + return (-1); + } + + /* Skip leading slash */ + start = &path[1]; + do { + end = start; + while (*end != '/' && *end != '\0') + end++; + + if (end - start >= MAXNAMELEN) { + if (why) + *why = NAME_ERR_TOOLONG; + return (-1); + } + start = end + 1; + + } while (*end != '\0'); + + return (0); +} + +/* + * For pool names, we have the same set of valid characters as described in + * dataset names, with the additional restriction that the pool name must begin + * with a letter. The pool names 'raidz' and 'mirror' are also reserved names + * that cannot be used. + */ +int +pool_namecheck(const char *pool, namecheck_err_t *why, char *what) +{ + const char *c; + + /* + * Make sure the name is not too long. + * + * ZPOOL_MAXNAMELEN is the maximum pool length used in the userland + * which is the same as MAXNAMELEN used in the kernel. + * If ZPOOL_MAXNAMELEN value is changed, make sure to cleanup all + * places using MAXNAMELEN. + */ + if (strlen(pool) >= MAXNAMELEN) { + if (why) + *why = NAME_ERR_TOOLONG; + return (-1); + } + + c = pool; + while (*c != '\0') { + if (!valid_char(*c)) { + if (why) { + *why = NAME_ERR_INVALCHAR; + *what = *c; + } + return (-1); + } + c++; + } + + if (!(*pool >= 'a' && *pool <= 'z') && + !(*pool >= 'A' && *pool <= 'Z')) { + if (why) + *why = NAME_ERR_NOLETTER; + return (-1); + } + + if (strcmp(pool, "mirror") == 0 || strcmp(pool, "raidz") == 0) { + if (why) + *why = NAME_ERR_RESERVED; + return (-1); + } + + if (pool[0] == 'c' && (pool[1] >= '0' && pool[1] <= '9')) { + if (why) + *why = NAME_ERR_DISKLIKE; + return (-1); + } + + return (0); +} + +/* + * Check if the dataset name is private for internal usage. + * '$' is reserved for internal dataset names. e.g. "$MOS" + * + * Return 1 if the given name is used internally. + * Return 0 if it is not. + */ +int +dataset_name_hidden(const char *name) +{ + if (strchr(name, '$') != NULL) + return (1); + + return (0); +} diff --git a/external/cddl/osnet/dist/common/zfs/zfs_namecheck.h b/external/cddl/osnet/dist/common/zfs/zfs_namecheck.h new file mode 100644 index 000000000000..ec85e62f72e8 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_namecheck.h @@ -0,0 +1,61 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_NAMECHECK_H +#define _ZFS_NAMECHECK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + NAME_ERR_LEADING_SLASH, /* name begins with leading slash */ + NAME_ERR_EMPTY_COMPONENT, /* name contains an empty component */ + NAME_ERR_TRAILING_SLASH, /* name ends with a slash */ + NAME_ERR_INVALCHAR, /* invalid character found */ + NAME_ERR_MULTIPLE_AT, /* multiple '@' characters found */ + NAME_ERR_NOLETTER, /* pool doesn't begin with a letter */ + NAME_ERR_RESERVED, /* entire name is reserved */ + NAME_ERR_DISKLIKE, /* reserved disk name (c[0-9].*) */ + NAME_ERR_TOOLONG, /* name is too long */ + NAME_ERR_NO_AT, /* permission set is missing '@' */ +} namecheck_err_t; + +#define ZFS_PERMSET_MAXLEN 64 + +int pool_namecheck(const char *, namecheck_err_t *, char *); +int dataset_namecheck(const char *, namecheck_err_t *, char *); +int mountpoint_namecheck(const char *, namecheck_err_t *); +int dataset_name_hidden(const char *); +int snapshot_namecheck(const char *, namecheck_err_t *, char *); +int permset_namecheck(const char *, namecheck_err_t *, char *); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_NAMECHECK_H */ diff --git a/external/cddl/osnet/dist/common/zfs/zfs_prop.c b/external/cddl/osnet/dist/common/zfs/zfs_prop.c new file mode 100644 index 000000000000..effd2dba7092 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_prop.c @@ -0,0 +1,496 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include + +#include "zfs_prop.h" +#include "zfs_deleg.h" + +#if defined(_KERNEL) +#include +#else +#include +#include +#include +#endif + +static zprop_desc_t zfs_prop_table[ZFS_NUM_PROPS]; + +zprop_desc_t * +zfs_prop_get_table(void) +{ + return (zfs_prop_table); +} + +void +zfs_prop_init(void) +{ + static zprop_index_t checksum_table[] = { + { "on", ZIO_CHECKSUM_ON }, + { "off", ZIO_CHECKSUM_OFF }, + { "fletcher2", ZIO_CHECKSUM_FLETCHER_2 }, + { "fletcher4", ZIO_CHECKSUM_FLETCHER_4 }, + { "sha256", ZIO_CHECKSUM_SHA256 }, + { NULL } + }; + + static zprop_index_t compress_table[] = { + { "on", ZIO_COMPRESS_ON }, + { "off", ZIO_COMPRESS_OFF }, + { "lzjb", ZIO_COMPRESS_LZJB }, + { "gzip", ZIO_COMPRESS_GZIP_6 }, /* gzip default */ + { "gzip-1", ZIO_COMPRESS_GZIP_1 }, + { "gzip-2", ZIO_COMPRESS_GZIP_2 }, + { "gzip-3", ZIO_COMPRESS_GZIP_3 }, + { "gzip-4", ZIO_COMPRESS_GZIP_4 }, + { "gzip-5", ZIO_COMPRESS_GZIP_5 }, + { "gzip-6", ZIO_COMPRESS_GZIP_6 }, + { "gzip-7", ZIO_COMPRESS_GZIP_7 }, + { "gzip-8", ZIO_COMPRESS_GZIP_8 }, + { "gzip-9", ZIO_COMPRESS_GZIP_9 }, + { NULL } + }; + + static zprop_index_t snapdir_table[] = { + { "hidden", ZFS_SNAPDIR_HIDDEN }, + { "visible", ZFS_SNAPDIR_VISIBLE }, + { NULL } + }; + + static zprop_index_t acl_mode_table[] = { + { "discard", ZFS_ACL_DISCARD }, + { "groupmask", ZFS_ACL_GROUPMASK }, + { "passthrough", ZFS_ACL_PASSTHROUGH }, + { NULL } + }; + + static zprop_index_t acl_inherit_table[] = { + { "discard", ZFS_ACL_DISCARD }, + { "noallow", ZFS_ACL_NOALLOW }, + { "restricted", ZFS_ACL_RESTRICTED }, + { "passthrough", ZFS_ACL_PASSTHROUGH }, + { "secure", ZFS_ACL_RESTRICTED }, /* bkwrd compatability */ + { "passthrough-x", ZFS_ACL_PASSTHROUGH_X }, + { NULL } + }; + + static zprop_index_t case_table[] = { + { "sensitive", ZFS_CASE_SENSITIVE }, + { "insensitive", ZFS_CASE_INSENSITIVE }, + { "mixed", ZFS_CASE_MIXED }, + { NULL } + }; + + static zprop_index_t copies_table[] = { + { "1", 1 }, + { "2", 2 }, + { "3", 3 }, + { NULL } + }; + + /* + * Use the unique flags we have to send to u8_strcmp() and/or + * u8_textprep() to represent the various normalization property + * values. + */ + static zprop_index_t normalize_table[] = { + { "none", 0 }, + { "formD", U8_TEXTPREP_NFD }, + { "formKC", U8_TEXTPREP_NFKC }, + { "formC", U8_TEXTPREP_NFC }, + { "formKD", U8_TEXTPREP_NFKD }, + { NULL } + }; + + static zprop_index_t version_table[] = { + { "1", 1 }, + { "2", 2 }, + { "3", 3 }, + { "current", ZPL_VERSION }, + { NULL } + }; + + static zprop_index_t boolean_table[] = { + { "off", 0 }, + { "on", 1 }, + { NULL } + }; + + static zprop_index_t canmount_table[] = { + { "off", ZFS_CANMOUNT_OFF }, + { "on", ZFS_CANMOUNT_ON }, + { "noauto", ZFS_CANMOUNT_NOAUTO }, + { NULL } + }; + + static zprop_index_t cache_table[] = { + { "none", ZFS_CACHE_NONE }, + { "metadata", ZFS_CACHE_METADATA }, + { "all", ZFS_CACHE_ALL }, + { NULL } + }; + + /* inherit index properties */ + register_index(ZFS_PROP_CHECKSUM, "checksum", ZIO_CHECKSUM_DEFAULT, + PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, + "on | off | fletcher2 | fletcher4 | sha256", "CHECKSUM", + checksum_table); + register_index(ZFS_PROP_COMPRESSION, "compression", + ZIO_COMPRESS_DEFAULT, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, + "on | off | lzjb | gzip | gzip-[1-9]", "COMPRESS", compress_table); + register_index(ZFS_PROP_SNAPDIR, "snapdir", ZFS_SNAPDIR_HIDDEN, + PROP_INHERIT, ZFS_TYPE_FILESYSTEM, + "hidden | visible", "SNAPDIR", snapdir_table); + register_index(ZFS_PROP_ACLMODE, "aclmode", ZFS_ACL_GROUPMASK, + PROP_INHERIT, ZFS_TYPE_FILESYSTEM, + "discard | groupmask | passthrough", "ACLMODE", acl_mode_table); + register_index(ZFS_PROP_ACLINHERIT, "aclinherit", ZFS_ACL_RESTRICTED, + PROP_INHERIT, ZFS_TYPE_FILESYSTEM, + "discard | noallow | restricted | passthrough | passthrough-x", + "ACLINHERIT", acl_inherit_table); + register_index(ZFS_PROP_COPIES, "copies", 1, + PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, + "1 | 2 | 3", "COPIES", copies_table); + register_index(ZFS_PROP_PRIMARYCACHE, "primarycache", + ZFS_CACHE_ALL, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME, + "all | none | metadata", "PRIMARYCACHE", cache_table); + register_index(ZFS_PROP_SECONDARYCACHE, "secondarycache", + ZFS_CACHE_ALL, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME, + "all | none | metadata", "SECONDARYCACHE", cache_table); + + /* inherit index (boolean) properties */ + register_index(ZFS_PROP_ATIME, "atime", 1, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "on | off", "ATIME", boolean_table); + register_index(ZFS_PROP_DEVICES, "devices", 1, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "DEVICES", + boolean_table); + register_index(ZFS_PROP_EXEC, "exec", 1, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "EXEC", + boolean_table); + register_index(ZFS_PROP_SETUID, "setuid", 1, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "SETUID", + boolean_table); + register_index(ZFS_PROP_READONLY, "readonly", 0, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "on | off", "RDONLY", + boolean_table); + register_index(ZFS_PROP_ZONED, "zoned", 0, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "on | off", "ZONED", boolean_table); + register_index(ZFS_PROP_XATTR, "xattr", 1, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "XATTR", + boolean_table); + register_index(ZFS_PROP_VSCAN, "vscan", 0, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "on | off", "VSCAN", + boolean_table); + register_index(ZFS_PROP_NBMAND, "nbmand", 0, PROP_INHERIT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "NBMAND", + boolean_table); + + /* default index properties */ + register_index(ZFS_PROP_VERSION, "version", 0, PROP_DEFAULT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, + "1 | 2 | 3 | current", "VERSION", version_table); + register_index(ZFS_PROP_CANMOUNT, "canmount", ZFS_CANMOUNT_ON, + PROP_DEFAULT, ZFS_TYPE_FILESYSTEM, "on | off | noauto", + "CANMOUNT", canmount_table); + + /* readonly index (boolean) properties */ + register_index(ZFS_PROP_MOUNTED, "mounted", 0, PROP_READONLY, + ZFS_TYPE_FILESYSTEM, "yes | no", "MOUNTED", boolean_table); + + /* set once index properties */ + register_index(ZFS_PROP_NORMALIZE, "normalization", 0, + PROP_ONETIME, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, + "none | formC | formD | formKC | formKD", "NORMALIZATION", + normalize_table); + register_index(ZFS_PROP_CASE, "casesensitivity", ZFS_CASE_SENSITIVE, + PROP_ONETIME, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, + "sensitive | insensitive | mixed", "CASE", case_table); + + /* set once index (boolean) properties */ + register_index(ZFS_PROP_UTF8ONLY, "utf8only", 0, PROP_ONETIME, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, + "on | off", "UTF8ONLY", boolean_table); + + /* string properties */ + register_string(ZFS_PROP_ORIGIN, "origin", NULL, PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "ORIGIN"); + register_string(ZFS_PROP_MOUNTPOINT, "mountpoint", "/", PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, " | legacy | none", "MOUNTPOINT"); + register_string(ZFS_PROP_SHARENFS, "sharenfs", "off", PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "on | off | share(1M) options", "SHARENFS"); + register_string(ZFS_PROP_SHAREISCSI, "shareiscsi", "off", PROP_INHERIT, + ZFS_TYPE_DATASET, "on | off | type=", "SHAREISCSI"); + register_string(ZFS_PROP_TYPE, "type", NULL, PROP_READONLY, + ZFS_TYPE_DATASET, "filesystem | volume | snapshot", "TYPE"); + register_string(ZFS_PROP_SHARESMB, "sharesmb", "off", PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "on | off | sharemgr(1M) options", "SHARESMB"); + + /* readonly number properties */ + register_number(ZFS_PROP_USED, "used", 0, PROP_READONLY, + ZFS_TYPE_DATASET, "", "USED"); + register_number(ZFS_PROP_AVAILABLE, "available", 0, PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "AVAIL"); + register_number(ZFS_PROP_REFERENCED, "referenced", 0, PROP_READONLY, + ZFS_TYPE_DATASET, "", "REFER"); + register_number(ZFS_PROP_COMPRESSRATIO, "compressratio", 0, + PROP_READONLY, ZFS_TYPE_DATASET, + "<1.00x or higher if compressed>", "RATIO"); + register_number(ZFS_PROP_VOLBLOCKSIZE, "volblocksize", 8192, + PROP_ONETIME, + ZFS_TYPE_VOLUME, "512 to 128k, power of 2", "VOLBLOCK"); + register_number(ZFS_PROP_USEDSNAP, "usedbysnapshots", 0, PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "USEDSNAP"); + register_number(ZFS_PROP_USEDDS, "usedbydataset", 0, PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "USEDDS"); + register_number(ZFS_PROP_USEDCHILD, "usedbychildren", 0, PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "USEDCHILD"); + register_number(ZFS_PROP_USEDREFRESERV, "usedbyrefreservation", 0, + PROP_READONLY, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "", "USEDREFRESERV"); + + /* default number properties */ + register_number(ZFS_PROP_QUOTA, "quota", 0, PROP_DEFAULT, + ZFS_TYPE_FILESYSTEM, " | none", "QUOTA"); + register_number(ZFS_PROP_RESERVATION, "reservation", 0, PROP_DEFAULT, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, " | none", "RESERV"); + register_number(ZFS_PROP_VOLSIZE, "volsize", 0, PROP_DEFAULT, + ZFS_TYPE_VOLUME, "", "VOLSIZE"); + register_number(ZFS_PROP_REFQUOTA, "refquota", 0, PROP_DEFAULT, + ZFS_TYPE_FILESYSTEM, " | none", "REFQUOTA"); + register_number(ZFS_PROP_REFRESERVATION, "refreservation", 0, + PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, + " | none", "REFRESERV"); + + /* inherit number properties */ + register_number(ZFS_PROP_RECORDSIZE, "recordsize", SPA_MAXBLOCKSIZE, + PROP_INHERIT, + ZFS_TYPE_FILESYSTEM, "512 to 128k, power of 2", "RECSIZE"); + + /* hidden properties */ + register_hidden(ZFS_PROP_CREATETXG, "createtxg", PROP_TYPE_NUMBER, + PROP_READONLY, ZFS_TYPE_DATASET, NULL); + register_hidden(ZFS_PROP_NUMCLONES, "numclones", PROP_TYPE_NUMBER, + PROP_READONLY, ZFS_TYPE_SNAPSHOT, NULL); + register_hidden(ZFS_PROP_NAME, "name", PROP_TYPE_STRING, + PROP_READONLY, ZFS_TYPE_DATASET, "NAME"); + register_hidden(ZFS_PROP_ISCSIOPTIONS, "iscsioptions", PROP_TYPE_STRING, + PROP_INHERIT, ZFS_TYPE_VOLUME, "ISCSIOPTIONS"); + register_hidden(ZFS_PROP_GUID, "guid", PROP_TYPE_NUMBER, PROP_READONLY, + ZFS_TYPE_DATASET, "GUID"); + + /* oddball properties */ + register_impl(ZFS_PROP_CREATION, "creation", PROP_TYPE_NUMBER, 0, NULL, + PROP_READONLY, ZFS_TYPE_DATASET, + "", "CREATION", B_FALSE, B_TRUE, NULL); +} + +boolean_t +zfs_prop_delegatable(zfs_prop_t prop) +{ + zprop_desc_t *pd = &zfs_prop_table[prop]; + return (pd->pd_attr != PROP_READONLY); +} + +/* + * Given a zfs dataset property name, returns the corresponding property ID. + */ +zfs_prop_t +zfs_name_to_prop(const char *propname) +{ + return (zprop_name_to_prop(propname, ZFS_TYPE_DATASET)); +} + + +/* + * For user property names, we allow all lowercase alphanumeric characters, plus + * a few useful punctuation characters. + */ +static int +valid_char(char c) +{ + return ((c >= 'a' && c <= 'z') || + (c >= '0' && c <= '9') || + c == '-' || c == '_' || c == '.' || c == ':'); +} + +/* + * Returns true if this is a valid user-defined property (one with a ':'). + */ +boolean_t +zfs_prop_user(const char *name) +{ + int i; + char c; + boolean_t foundsep = B_FALSE; + + for (i = 0; i < strlen(name); i++) { + c = name[i]; + if (!valid_char(c)) + return (B_FALSE); + if (c == ':') + foundsep = B_TRUE; + } + + if (!foundsep) + return (B_FALSE); + + return (B_TRUE); +} + +/* + * Tables of index types, plus functions to convert between the user view + * (strings) and internal representation (uint64_t). + */ +int +zfs_prop_string_to_index(zfs_prop_t prop, const char *string, uint64_t *index) +{ + return (zprop_string_to_index(prop, string, index, ZFS_TYPE_DATASET)); +} + +int +zfs_prop_index_to_string(zfs_prop_t prop, uint64_t index, const char **string) +{ + return (zprop_index_to_string(prop, index, string, ZFS_TYPE_DATASET)); +} + +/* + * Returns TRUE if the property applies to any of the given dataset types. + */ +boolean_t +zfs_prop_valid_for_type(int prop, zfs_type_t types) +{ + return (zprop_valid_for_type(prop, types)); +} + +zprop_type_t +zfs_prop_get_type(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_proptype); +} + +/* + * Returns TRUE if the property is readonly. + */ +boolean_t +zfs_prop_readonly(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_attr == PROP_READONLY || + zfs_prop_table[prop].pd_attr == PROP_ONETIME); +} + +/* + * Returns TRUE if the property is only allowed to be set once. + */ +boolean_t +zfs_prop_setonce(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_attr == PROP_ONETIME); +} + +const char * +zfs_prop_default_string(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_strdefault); +} + +uint64_t +zfs_prop_default_numeric(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_numdefault); +} + +/* + * Given a dataset property ID, returns the corresponding name. + * Assuming the zfs dataset property ID is valid. + */ +const char * +zfs_prop_to_name(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_name); +} + +/* + * Returns TRUE if the property is inheritable. + */ +boolean_t +zfs_prop_inheritable(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_attr == PROP_INHERIT || + zfs_prop_table[prop].pd_attr == PROP_ONETIME); +} + +#ifndef _KERNEL + +/* + * Returns a string describing the set of acceptable values for the given + * zfs property, or NULL if it cannot be set. + */ +const char * +zfs_prop_values(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_values); +} + +/* + * Returns TRUE if this property is a string type. Note that index types + * (compression, checksum) are treated as strings in userland, even though they + * are stored numerically on disk. + */ +int +zfs_prop_is_string(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_proptype == PROP_TYPE_STRING || + zfs_prop_table[prop].pd_proptype == PROP_TYPE_INDEX); +} + +/* + * Returns the column header for the given property. Used only in + * 'zfs list -o', but centralized here with the other property information. + */ +const char * +zfs_prop_column_name(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_colname); +} + +/* + * Returns whether the given property should be displayed right-justified for + * 'zfs list'. + */ +boolean_t +zfs_prop_align_right(zfs_prop_t prop) +{ + return (zfs_prop_table[prop].pd_rightalign); +} + +#endif diff --git a/external/cddl/osnet/dist/common/zfs/zfs_prop.h b/external/cddl/osnet/dist/common/zfs/zfs_prop.h new file mode 100644 index 000000000000..da5ae43093e5 --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zfs_prop.h @@ -0,0 +1,129 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_PROP_H +#define _ZFS_PROP_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * For index types (e.g. compression and checksum), we want the numeric value + * in the kernel, but the string value in userland. + */ +typedef enum { + PROP_TYPE_NUMBER, /* numeric value */ + PROP_TYPE_STRING, /* string value */ + PROP_TYPE_INDEX /* numeric value indexed by string */ +} zprop_type_t; + +typedef enum { + PROP_DEFAULT, + PROP_READONLY, + PROP_INHERIT, + /* + * ONETIME properties are a sort of conglomeration of READONLY + * and INHERIT. They can be set only during object creation, + * after that they are READONLY. If not explicitly set during + * creation, they can be inherited. + */ + PROP_ONETIME +} zprop_attr_t; + +typedef struct zfs_index { + const char *pi_name; + uint64_t pi_value; +} zprop_index_t; + +typedef struct { + const char *pd_name; /* human-readable property name */ + int pd_propnum; /* property number */ + zprop_type_t pd_proptype; /* string, boolean, index, number */ + const char *pd_strdefault; /* default for strings */ + uint64_t pd_numdefault; /* for boolean / index / number */ + zprop_attr_t pd_attr; /* default, readonly, inherit */ + int pd_types; /* bitfield of valid dataset types */ + /* fs | vol | snap; or pool */ + const char *pd_values; /* string telling acceptable values */ + const char *pd_colname; /* column header for "zfs list" */ + boolean_t pd_rightalign; /* column alignment for "zfs list" */ + boolean_t pd_visible; /* do we list this property with the */ + /* "zfs get" help message */ + const zprop_index_t *pd_table; /* for index properties, a table */ + /* defining the possible values */ +} zprop_desc_t; + +/* + * zfs dataset property functions + */ +void zfs_prop_init(void); +zprop_type_t zfs_prop_get_type(zfs_prop_t); +boolean_t zfs_prop_delegatable(zfs_prop_t prop); +zprop_desc_t *zfs_prop_get_table(void); + +/* + * zpool property functions + */ +void zpool_prop_init(void); +zprop_type_t zpool_prop_get_type(zpool_prop_t); +zprop_desc_t *zpool_prop_get_table(void); + +/* + * Common routines to initialize property tables + */ +void register_impl(int, const char *, zprop_type_t, uint64_t, + const char *, zprop_attr_t, int, const char *, const char *, + boolean_t, boolean_t, const zprop_index_t *); +void register_string(int, const char *, const char *, zprop_attr_t attr, + int, const char *, const char *); +void register_number(int, const char *, uint64_t, zprop_attr_t, int, + const char *, const char *); +void register_index(int, const char *, uint64_t, zprop_attr_t, int, + const char *, const char *, const zprop_index_t *); +void register_hidden(int, const char *, zprop_type_t, zprop_attr_t, + int, const char *); + +/* + * Common routines for zfs and zpool property management + */ +int zprop_iter_common(zprop_func, void *, boolean_t, boolean_t, zfs_type_t); +int zprop_name_to_prop(const char *, zfs_type_t); +int zprop_string_to_index(int, const char *, uint64_t *, zfs_type_t); +int zprop_index_to_string(int, uint64_t, const char **, zfs_type_t); +const char *zprop_values(int, zfs_type_t); +size_t zprop_width(int, boolean_t *, zfs_type_t); +boolean_t zprop_valid_for_type(int, zfs_type_t); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_PROP_H */ diff --git a/external/cddl/osnet/dist/common/zfs/zpool_prop.c b/external/cddl/osnet/dist/common/zfs/zpool_prop.c new file mode 100644 index 000000000000..f5efe18d248b --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zpool_prop.c @@ -0,0 +1,186 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +#include "zfs_prop.h" + +#if defined(_KERNEL) +#include +#else +#include +#include +#include +#endif + +static zprop_desc_t zpool_prop_table[ZPOOL_NUM_PROPS]; + +zprop_desc_t * +zpool_prop_get_table(void) +{ + return (zpool_prop_table); +} + +void +zpool_prop_init(void) +{ + static zprop_index_t boolean_table[] = { + { "off", 0}, + { "on", 1}, + { NULL } + }; + + static zprop_index_t failuremode_table[] = { + { "wait", ZIO_FAILURE_MODE_WAIT }, + { "continue", ZIO_FAILURE_MODE_CONTINUE }, + { "panic", ZIO_FAILURE_MODE_PANIC }, + { NULL } + }; + + /* string properties */ + register_string(ZPOOL_PROP_ALTROOT, "altroot", NULL, PROP_DEFAULT, + ZFS_TYPE_POOL, "", "ALTROOT"); + register_string(ZPOOL_PROP_BOOTFS, "bootfs", NULL, PROP_DEFAULT, + ZFS_TYPE_POOL, "", "BOOTFS"); + register_string(ZPOOL_PROP_CACHEFILE, "cachefile", NULL, PROP_DEFAULT, + ZFS_TYPE_POOL, " | none", "CACHEFILE"); + + /* readonly number properties */ + register_number(ZPOOL_PROP_SIZE, "size", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "SIZE"); + register_number(ZPOOL_PROP_USED, "used", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "USED"); + register_number(ZPOOL_PROP_AVAILABLE, "available", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "AVAIL"); + register_number(ZPOOL_PROP_CAPACITY, "capacity", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "CAP"); + register_number(ZPOOL_PROP_GUID, "guid", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "GUID"); + register_number(ZPOOL_PROP_HEALTH, "health", 0, PROP_READONLY, + ZFS_TYPE_POOL, "", "HEALTH"); + + /* default number properties */ + register_number(ZPOOL_PROP_VERSION, "version", SPA_VERSION, + PROP_DEFAULT, ZFS_TYPE_POOL, "", "VERSION"); + + /* default index (boolean) properties */ + register_index(ZPOOL_PROP_DELEGATION, "delegation", 1, PROP_DEFAULT, + ZFS_TYPE_POOL, "on | off", "DELEGATION", boolean_table); + register_index(ZPOOL_PROP_AUTOREPLACE, "autoreplace", 0, PROP_DEFAULT, + ZFS_TYPE_POOL, "on | off", "REPLACE", boolean_table); + register_index(ZPOOL_PROP_LISTSNAPS, "listsnapshots", 0, PROP_DEFAULT, + ZFS_TYPE_POOL, "on | off", "LISTSNAPS", boolean_table); + + /* default index properties */ + register_index(ZPOOL_PROP_FAILUREMODE, "failmode", + ZIO_FAILURE_MODE_WAIT, PROP_DEFAULT, ZFS_TYPE_POOL, + "wait | continue | panic", "FAILMODE", failuremode_table); + + /* hidden properties */ + register_hidden(ZPOOL_PROP_NAME, "name", PROP_TYPE_STRING, + PROP_READONLY, ZFS_TYPE_POOL, "NAME"); +} + +/* + * Given a property name and its type, returns the corresponding property ID. + */ +zpool_prop_t +zpool_name_to_prop(const char *propname) +{ + return (zprop_name_to_prop(propname, ZFS_TYPE_POOL)); +} + +/* + * Given a pool property ID, returns the corresponding name. + * Assuming the pool propety ID is valid. + */ +const char * +zpool_prop_to_name(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_name); +} + +zprop_type_t +zpool_prop_get_type(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_proptype); +} + +boolean_t +zpool_prop_readonly(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_attr == PROP_READONLY); +} + +const char * +zpool_prop_default_string(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_strdefault); +} + +uint64_t +zpool_prop_default_numeric(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_numdefault); +} + +int +zpool_prop_string_to_index(zpool_prop_t prop, const char *string, + uint64_t *index) +{ + return (zprop_string_to_index(prop, string, index, ZFS_TYPE_POOL)); +} + +int +zpool_prop_index_to_string(zpool_prop_t prop, uint64_t index, + const char **string) +{ + return (zprop_index_to_string(prop, index, string, ZFS_TYPE_POOL)); +} + +#ifndef _KERNEL + +const char * +zpool_prop_values(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_values); +} + +const char * +zpool_prop_column_name(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_colname); +} + +boolean_t +zpool_prop_align_right(zpool_prop_t prop) +{ + return (zpool_prop_table[prop].pd_rightalign); +} +#endif diff --git a/external/cddl/osnet/dist/common/zfs/zprop_common.c b/external/cddl/osnet/dist/common/zfs/zprop_common.c new file mode 100644 index 000000000000..bd267e2e61ca --- /dev/null +++ b/external/cddl/osnet/dist/common/zfs/zprop_common.c @@ -0,0 +1,406 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Common routines used by zfs and zpool property management. + */ + +#include +#include +#include +#include +#include +#include + +#include "zfs_prop.h" +#include "zfs_deleg.h" + +#if defined(_KERNEL) +#include +#include +#else +#include +#include +#include +#endif + +static zprop_desc_t * +zprop_get_proptable(zfs_type_t type) +{ + if (type == ZFS_TYPE_POOL) + return (zpool_prop_get_table()); + else + return (zfs_prop_get_table()); +} + +static int +zprop_get_numprops(zfs_type_t type) +{ + if (type == ZFS_TYPE_POOL) + return (ZPOOL_NUM_PROPS); + else + return (ZFS_NUM_PROPS); +} + +void +register_impl(int prop, const char *name, zprop_type_t type, + uint64_t numdefault, const char *strdefault, zprop_attr_t attr, + int objset_types, const char *values, const char *colname, + boolean_t rightalign, boolean_t visible, const zprop_index_t *idx_tbl) +{ + zprop_desc_t *prop_tbl = zprop_get_proptable(objset_types); + zprop_desc_t *pd; + + pd = &prop_tbl[prop]; + + ASSERT(pd->pd_name == NULL || pd->pd_name == name); + + pd->pd_name = name; + pd->pd_propnum = prop; + pd->pd_proptype = type; + pd->pd_numdefault = numdefault; + pd->pd_strdefault = strdefault; + pd->pd_attr = attr; + pd->pd_types = objset_types; + pd->pd_values = values; + pd->pd_colname = colname; + pd->pd_rightalign = rightalign; + pd->pd_visible = visible; + pd->pd_table = idx_tbl; +} + +void +register_string(int prop, const char *name, const char *def, + zprop_attr_t attr, int objset_types, const char *values, + const char *colname) +{ + register_impl(prop, name, PROP_TYPE_STRING, 0, def, attr, + objset_types, values, colname, B_FALSE, B_TRUE, NULL); + +} + +void +register_number(int prop, const char *name, uint64_t def, zprop_attr_t attr, + int objset_types, const char *values, const char *colname) +{ + register_impl(prop, name, PROP_TYPE_NUMBER, def, NULL, attr, + objset_types, values, colname, B_TRUE, B_TRUE, NULL); +} + +void +register_index(int prop, const char *name, uint64_t def, zprop_attr_t attr, + int objset_types, const char *values, const char *colname, + const zprop_index_t *idx_tbl) +{ + register_impl(prop, name, PROP_TYPE_INDEX, def, NULL, attr, + objset_types, values, colname, B_TRUE, B_TRUE, idx_tbl); +} + +void +register_hidden(int prop, const char *name, zprop_type_t type, + zprop_attr_t attr, int objset_types, const char *colname) +{ + register_impl(prop, name, type, 0, NULL, attr, + objset_types, NULL, colname, B_FALSE, B_FALSE, NULL); +} + + +/* + * A comparison function we can use to order indexes into property tables. + */ +static int +zprop_compare(const void *arg1, const void *arg2) +{ + const zprop_desc_t *p1 = *((zprop_desc_t **)arg1); + const zprop_desc_t *p2 = *((zprop_desc_t **)arg2); + boolean_t p1ro, p2ro; + + p1ro = (p1->pd_attr == PROP_READONLY); + p2ro = (p2->pd_attr == PROP_READONLY); + + if (p1ro == p2ro) + return (strcmp(p1->pd_name, p2->pd_name)); + + return (p1ro ? -1 : 1); +} + +/* + * Iterate over all properties in the given property table, calling back + * into the specified function for each property. We will continue to + * iterate until we either reach the end or the callback function returns + * something other than ZPROP_CONT. + */ +int +zprop_iter_common(zprop_func func, void *cb, boolean_t show_all, + boolean_t ordered, zfs_type_t type) +{ + int i, num_props, size, prop; + zprop_desc_t *prop_tbl; + zprop_desc_t **order; + + prop_tbl = zprop_get_proptable(type); + num_props = zprop_get_numprops(type); + size = num_props * sizeof (zprop_desc_t *); + +#if defined(_KERNEL) + order = kmem_alloc(size, KM_SLEEP); +#else + if ((order = malloc(size)) == NULL) + return (ZPROP_CONT); +#endif + + for (int j = 0; j < num_props; j++) + order[j] = &prop_tbl[j]; + + if (ordered) { + qsort((void *)order, num_props, sizeof (zprop_desc_t *), + zprop_compare); + } + + prop = ZPROP_CONT; + for (i = 0; i < num_props; i++) { + if ((order[i]->pd_visible || show_all) && + (func(order[i]->pd_propnum, cb) != ZPROP_CONT)) { + prop = order[i]->pd_propnum; + break; + } + } + +#if defined(_KERNEL) + kmem_free(order, size); +#else + free(order); +#endif + return (prop); +} + +static boolean_t +propname_match(const char *p, size_t len, zprop_desc_t *prop_entry) +{ + const char *propname = prop_entry->pd_name; +#ifndef _KERNEL + const char *colname = prop_entry->pd_colname; + int c; + + if (colname == NULL) + return (B_FALSE); +#endif + + if (len == strlen(propname) && + strncmp(p, propname, len) == 0) + return (B_TRUE); + +#ifndef _KERNEL + if (len != strlen(colname)) + return (B_FALSE); + + for (c = 0; c < len; c++) + if (p[c] != tolower(colname[c])) + break; + + return (colname[c] == '\0'); +#else + return (B_FALSE); +#endif +} + +typedef struct name_to_prop_cb { + const char *propname; + zprop_desc_t *prop_tbl; +} name_to_prop_cb_t; + +static int +zprop_name_to_prop_cb(int prop, void *cb_data) +{ + name_to_prop_cb_t *data = cb_data; + + if (propname_match(data->propname, strlen(data->propname), + &data->prop_tbl[prop])) + return (prop); + + return (ZPROP_CONT); +} + +int +zprop_name_to_prop(const char *propname, zfs_type_t type) +{ + int prop; + name_to_prop_cb_t cb_data; + + cb_data.propname = propname; + cb_data.prop_tbl = zprop_get_proptable(type); + + prop = zprop_iter_common(zprop_name_to_prop_cb, &cb_data, + B_TRUE, B_FALSE, type); + + return (prop == ZPROP_CONT ? ZPROP_INVAL : prop); +} + +int +zprop_string_to_index(int prop, const char *string, uint64_t *index, + zfs_type_t type) +{ + zprop_desc_t *prop_tbl; + const zprop_index_t *idx_tbl; + int i; + + if (prop == ZPROP_INVAL || prop == ZPROP_CONT) + return (-1); + + ASSERT(prop < zprop_get_numprops(type)); + prop_tbl = zprop_get_proptable(type); + if ((idx_tbl = prop_tbl[prop].pd_table) == NULL) + return (-1); + + for (i = 0; idx_tbl[i].pi_name != NULL; i++) { + if (strcmp(string, idx_tbl[i].pi_name) == 0) { + *index = idx_tbl[i].pi_value; + return (0); + } + } + + return (-1); +} + +int +zprop_index_to_string(int prop, uint64_t index, const char **string, + zfs_type_t type) +{ + zprop_desc_t *prop_tbl; + const zprop_index_t *idx_tbl; + int i; + + if (prop == ZPROP_INVAL || prop == ZPROP_CONT) + return (-1); + + ASSERT(prop < zprop_get_numprops(type)); + prop_tbl = zprop_get_proptable(type); + if ((idx_tbl = prop_tbl[prop].pd_table) == NULL) + return (-1); + + for (i = 0; idx_tbl[i].pi_name != NULL; i++) { + if (idx_tbl[i].pi_value == index) { + *string = idx_tbl[i].pi_name; + return (0); + } + } + + return (-1); +} + +const char * +zprop_values(int prop, zfs_type_t type) +{ + zprop_desc_t *prop_tbl; + + ASSERT(prop != ZPROP_INVAL && prop != ZPROP_CONT); + ASSERT(prop < zprop_get_numprops(type)); + + prop_tbl = zprop_get_proptable(type); + + return (prop_tbl[prop].pd_values); +} + +/* + * Returns TRUE if the property applies to any of the given dataset types. + */ +boolean_t +zprop_valid_for_type(int prop, zfs_type_t type) +{ + zprop_desc_t *prop_tbl; + + if (prop == ZPROP_INVAL || prop == ZPROP_CONT) + return (B_FALSE); + + ASSERT(prop < zprop_get_numprops(type)); + prop_tbl = zprop_get_proptable(type); + return ((prop_tbl[prop].pd_types & type) != 0); +} + +#ifndef _KERNEL + +/* + * Determines the minimum width for the column, and indicates whether it's fixed + * or not. Only string columns are non-fixed. + */ +size_t +zprop_width(int prop, boolean_t *fixed, zfs_type_t type) +{ + zprop_desc_t *prop_tbl, *pd; + const zprop_index_t *idx; + size_t ret; + int i; + + ASSERT(prop != ZPROP_INVAL && prop != ZPROP_CONT); + ASSERT(prop < zprop_get_numprops(type)); + + prop_tbl = zprop_get_proptable(type); + pd = &prop_tbl[prop]; + + *fixed = B_TRUE; + + /* + * Start with the width of the column name. + */ + ret = strlen(pd->pd_colname); + + /* + * For fixed-width values, make sure the width is large enough to hold + * any possible value. + */ + switch (pd->pd_proptype) { + case PROP_TYPE_NUMBER: + /* + * The maximum length of a human-readable number is 5 characters + * ("20.4M", for example). + */ + if (ret < 5) + ret = 5; + /* + * 'creation' is handled specially because it's a number + * internally, but displayed as a date string. + */ + if (prop == ZFS_PROP_CREATION) + *fixed = B_FALSE; + break; + case PROP_TYPE_INDEX: + idx = prop_tbl[prop].pd_table; + for (i = 0; idx[i].pi_name != NULL; i++) { + if (strlen(idx[i].pi_name) > ret) + ret = strlen(idx[i].pi_name); + } + break; + + case PROP_TYPE_STRING: + *fixed = B_FALSE; + break; + } + + return (ret); +} + +#endif diff --git a/external/cddl/osnet/dist/head/ucred.h b/external/cddl/osnet/dist/head/ucred.h new file mode 100644 index 000000000000..e10ab26f5210 --- /dev/null +++ b/external/cddl/osnet/dist/head/ucred.h @@ -0,0 +1,108 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _UCRED_H_ +#define _UCRED_H_ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ucred_s ucred_t; + +/* + * library functions prototype. + */ +#if defined(__STDC__) + +extern ucred_t *ucred_get(pid_t pid); + +extern void ucred_free(ucred_t *); + +extern uid_t ucred_geteuid(const ucred_t *); +extern uid_t ucred_getruid(const ucred_t *); +extern uid_t ucred_getsuid(const ucred_t *); +extern gid_t ucred_getegid(const ucred_t *); +extern gid_t ucred_getrgid(const ucred_t *); +extern gid_t ucred_getsgid(const ucred_t *); +extern int ucred_getgroups(const ucred_t *, const gid_t **); + +extern const priv_set_t *ucred_getprivset(const ucred_t *, priv_ptype_t); +extern uint_t ucred_getpflags(const ucred_t *, uint_t); + +extern pid_t ucred_getpid(const ucred_t *); /* for door_cred compatibility */ + +extern size_t ucred_size(void); + +extern int getpeerucred(int, ucred_t **); + +extern zoneid_t ucred_getzoneid(const ucred_t *); + +extern bslabel_t *ucred_getlabel(const ucred_t *); + +extern projid_t ucred_getprojid(const ucred_t *); + +#else /* Non ANSI */ + +extern ucred_t *ucred_get(/* pid_t pid */); + +extern void ucred_free(/* ucred_t * */); + +extern uid_t ucred_geteuid(/* ucred_t * */); +extern uid_t ucred_getruid(/* ucred_t * */); +extern uid_t ucred_getsuid(/* ucred_t * */); +extern gid_t ucred_getegid(/* ucred_t * */); +extern gid_t ucred_getrgid(/* ucred_t * */); +extern gid_t ucred_getsgid(/* ucred_t * */); +extern int ucred_getgroups(/* ucred_t *, gid_t ** */); + +extern priv_set_t *ucred_getprivset(/* ucred_t *, priv_ptype_t */); +extern uint_t ucred_getpflags(/* ucred_t *, uint_t */); + +extern pid_t ucred_getpid(/* ucred_t * */); + +extern size_t ucred_size(/* void */); + +extern int getpeerucred(/* int, ucred_t ** */); + +extern zoneid_t ucred_getzoneid(/* ucred_t * */); + +extern bslabel_t *ucred_getlabel(/* const ucred_t * */); + +extern projid_t ucred_getprojid(/* ucred_t * */); + +#endif /* __STDC__ */ + +#ifdef __cplusplus +} +#endif + +#endif /* _UCRED_H_ */ diff --git a/external/cddl/osnet/dist/lib/libefi/common/crc32_efi.c b/external/cddl/osnet/dist/lib/libefi/common/crc32_efi.c new file mode 100644 index 000000000000..1dba28ad7211 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libefi/common/crc32_efi.c @@ -0,0 +1,47 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2002 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +static unsigned int crc32_tab[] = { CRC32_TABLE }; + +/* + * Return a 32-bit CRC of the contents of the buffer. + * + * The seed is 0xffffffff and the result is XORed with 0xffffffff + * because this is what the Itanium firmware expects. + */ +unsigned int +efi_crc32(const unsigned char *s, unsigned int len) +{ + unsigned int crc32val; + + CRC32(crc32val, s, len, -1U, crc32_tab); + + return (crc32val ^ -1U); +} diff --git a/external/cddl/osnet/dist/lib/libnvpair/libnvpair.c b/external/cddl/osnet/dist/lib/libnvpair/libnvpair.c new file mode 100644 index 000000000000..0845cb08cf8d --- /dev/null +++ b/external/cddl/osnet/dist/lib/libnvpair/libnvpair.c @@ -0,0 +1,618 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include "libnvpair.h" + +/* + * libnvpair - A tools library for manipulating pairs. + * + * This library provides routines packing an unpacking nv pairs + * for transporting data across process boundaries, transporting + * between kernel and userland, and possibly saving onto disk files. + */ + +static void +indent(FILE *fp, int depth) +{ + while (depth-- > 0) + (void) fprintf(fp, "\t"); +} + +/* + * nvlist_print - Prints elements in an event buffer + */ +static +void +nvlist_print_with_indent(FILE *fp, nvlist_t *nvl, int depth) +{ + int i; + char *name; + uint_t nelem; + nvpair_t *nvp; + + if (nvl == NULL) + return; + + indent(fp, depth); + (void) fprintf(fp, "nvlist version: %d\n", NVL_VERSION(nvl)); + + nvp = nvlist_next_nvpair(nvl, NULL); + + while (nvp) { + data_type_t type = nvpair_type(nvp); + + indent(fp, depth); + name = nvpair_name(nvp); + (void) fprintf(fp, "\t%s =", name); + nelem = 0; + switch (type) { + case DATA_TYPE_BOOLEAN: { + (void) fprintf(fp, " 1"); + break; + } + case DATA_TYPE_BOOLEAN_VALUE: { + boolean_t val; + (void) nvpair_value_boolean_value(nvp, &val); + (void) fprintf(fp, " %d", val); + break; + } + case DATA_TYPE_BYTE: { + uchar_t val; + (void) nvpair_value_byte(nvp, &val); + (void) fprintf(fp, " 0x%2.2x", val); + break; + } + case DATA_TYPE_INT8: { + int8_t val; + (void) nvpair_value_int8(nvp, &val); + (void) fprintf(fp, " %d", val); + break; + } + case DATA_TYPE_UINT8: { + uint8_t val; + (void) nvpair_value_uint8(nvp, &val); + (void) fprintf(fp, " 0x%x", val); + break; + } + case DATA_TYPE_INT16: { + int16_t val; + (void) nvpair_value_int16(nvp, &val); + (void) fprintf(fp, " %d", val); + break; + } + case DATA_TYPE_UINT16: { + uint16_t val; + (void) nvpair_value_uint16(nvp, &val); + (void) fprintf(fp, " 0x%x", val); + break; + } + case DATA_TYPE_INT32: { + int32_t val; + (void) nvpair_value_int32(nvp, &val); + (void) fprintf(fp, " %d", val); + break; + } + case DATA_TYPE_UINT32: { + uint32_t val; + (void) nvpair_value_uint32(nvp, &val); + (void) fprintf(fp, " 0x%x", val); + break; + } + case DATA_TYPE_INT64: { + int64_t val; + (void) nvpair_value_int64(nvp, &val); + (void) fprintf(fp, " %lld", (longlong_t)val); + break; + } + case DATA_TYPE_UINT64: { + uint64_t val; + (void) nvpair_value_uint64(nvp, &val); + (void) fprintf(fp, " 0x%llx", (u_longlong_t)val); + break; + } + case DATA_TYPE_DOUBLE: { + double val; + (void) nvpair_value_double(nvp, &val); + (void) fprintf(fp, " 0x%llf", val); + break; + } + case DATA_TYPE_STRING: { + char *val; + (void) nvpair_value_string(nvp, &val); + (void) fprintf(fp, " %s", val); + break; + } + case DATA_TYPE_BOOLEAN_ARRAY: { + boolean_t *val; + (void) nvpair_value_boolean_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %d", val[i]); + break; + } + case DATA_TYPE_BYTE_ARRAY: { + uchar_t *val; + (void) nvpair_value_byte_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " 0x%2.2x", val[i]); + break; + } + case DATA_TYPE_INT8_ARRAY: { + int8_t *val; + (void) nvpair_value_int8_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %d", val[i]); + break; + } + case DATA_TYPE_UINT8_ARRAY: { + uint8_t *val; + (void) nvpair_value_uint8_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " 0x%x", val[i]); + break; + } + case DATA_TYPE_INT16_ARRAY: { + int16_t *val; + (void) nvpair_value_int16_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %d", val[i]); + break; + } + case DATA_TYPE_UINT16_ARRAY: { + uint16_t *val; + (void) nvpair_value_uint16_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " 0x%x", val[i]); + break; + } + case DATA_TYPE_INT32_ARRAY: { + int32_t *val; + (void) nvpair_value_int32_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %d", val[i]); + break; + } + case DATA_TYPE_UINT32_ARRAY: { + uint32_t *val; + (void) nvpair_value_uint32_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " 0x%x", val[i]); + break; + } + case DATA_TYPE_INT64_ARRAY: { + int64_t *val; + (void) nvpair_value_int64_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %lld", (longlong_t)val[i]); + break; + } + case DATA_TYPE_UINT64_ARRAY: { + uint64_t *val; + (void) nvpair_value_uint64_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " 0x%llx", + (u_longlong_t)val[i]); + break; + } + case DATA_TYPE_STRING_ARRAY: { + char **val; + (void) nvpair_value_string_array(nvp, &val, &nelem); + for (i = 0; i < nelem; i++) + (void) fprintf(fp, " %s", val[i]); + break; + } + case DATA_TYPE_HRTIME: { + hrtime_t val; + (void) nvpair_value_hrtime(nvp, &val); + (void) fprintf(fp, " 0x%llx", val); + break; + } + case DATA_TYPE_NVLIST: { + nvlist_t *val; + (void) nvpair_value_nvlist(nvp, &val); + (void) fprintf(fp, " (embedded nvlist)\n"); + nvlist_print_with_indent(fp, val, depth + 1); + indent(fp, depth + 1); + (void) fprintf(fp, "(end %s)\n", name); + break; + } + case DATA_TYPE_NVLIST_ARRAY: { + nvlist_t **val; + (void) nvpair_value_nvlist_array(nvp, &val, &nelem); + (void) fprintf(fp, " (array of embedded nvlists)\n"); + for (i = 0; i < nelem; i++) { + indent(fp, depth + 1); + (void) fprintf(fp, + "(start %s[%d])\n", name, i); + nvlist_print_with_indent(fp, val[i], depth + 1); + indent(fp, depth + 1); + (void) fprintf(fp, "(end %s[%d])\n", name, i); + } + break; + } + default: + (void) fprintf(fp, " unknown data type (%d)", type); + break; + } + (void) fprintf(fp, "\n"); + nvp = nvlist_next_nvpair(nvl, nvp); + } +} + +void +nvlist_print(FILE *fp, nvlist_t *nvl) +{ + nvlist_print_with_indent(fp, nvl, 0); +} + +/* + * Determine if string 'value' matches 'nvp' value. The 'value' string is + * converted, depending on the type of 'nvp', prior to match. For numeric + * types, a radix independent sscanf conversion of 'value' is used. If 'nvp' + * is an array type, 'ai' is the index into the array against which we are + * checking for match. If nvp is of DATA_TYPE_STRING*, the caller can pass + * in a regex_t compilation of value in 'value_regex' to trigger regular + * expression string match instead of simple strcmp(). + * + * Return 1 on match, 0 on no-match, and -1 on error. If the error is + * related to value syntax error and 'ep' is non-NULL, *ep will point into + * the 'value' string at the location where the error exists. + * + * NOTE: It may be possible to move the non-regex_t version of this into + * common code used by library/kernel/boot. + */ +int +nvpair_value_match_regex(nvpair_t *nvp, int ai, + char *value, regex_t *value_regex, char **ep) +{ + char *evalue; + uint_t a_len; + int sr; + + if (ep) + *ep = NULL; + + if ((nvp == NULL) || (value == NULL)) + return (-1); /* error fail match - invalid args */ + + /* make sure array and index combination make sense */ + if ((nvpair_type_is_array(nvp) && (ai < 0)) || + (!nvpair_type_is_array(nvp) && (ai >= 0))) + return (-1); /* error fail match - bad index */ + + /* non-string values should be single 'chunk' */ + if ((nvpair_type(nvp) != DATA_TYPE_STRING) && + (nvpair_type(nvp) != DATA_TYPE_STRING_ARRAY)) { + value += strspn(value, " \t"); + evalue = value + strcspn(value, " \t"); + if (*evalue) { + if (ep) + *ep = evalue; + return (-1); /* error fail match - syntax */ + } + } + + sr = EOF; + switch (nvpair_type(nvp)) { + case DATA_TYPE_STRING: { + char *val; + + /* check string value for match */ + if (nvpair_value_string(nvp, &val) == 0) { + if (value_regex) { + if (regexec(value_regex, val, + (size_t)0, NULL, 0) == 0) + return (1); /* match */ + } else { + if (strcmp(value, val) == 0) + return (1); /* match */ + } + } + break; + } + case DATA_TYPE_STRING_ARRAY: { + char **val_array; + + /* check indexed string value of array for match */ + if ((nvpair_value_string_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len)) { + if (value_regex) { + if (regexec(value_regex, val_array[ai], + (size_t)0, NULL, 0) == 0) + return (1); + } else { + if (strcmp(value, val_array[ai]) == 0) + return (1); + } + } + break; + } + case DATA_TYPE_BYTE: { + uchar_t val, val_arg; + + /* scanf uchar_t from value and check for match */ + sr = sscanf(value, "%c", &val_arg); + if ((sr == 1) && (nvpair_value_byte(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_BYTE_ARRAY: { + uchar_t *val_array, val_arg; + + + /* check indexed value of array for match */ + sr = sscanf(value, "%c", &val_arg); + if ((sr == 1) && + (nvpair_value_byte_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT8: { + int8_t val, val_arg; + + /* scanf int8_t from value and check for match */ + sr = sscanf(value, "%"SCNi8, &val_arg); + if ((sr == 1) && + (nvpair_value_int8(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT8_ARRAY: { + int8_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi8, &val_arg); + if ((sr == 1) && + (nvpair_value_int8_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT8: { + uint8_t val, val_arg; + + /* scanf uint8_t from value and check for match */ + sr = sscanf(value, "%"SCNi8, (int8_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint8(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT8_ARRAY: { + uint8_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi8, (int8_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint8_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT16: { + int16_t val, val_arg; + + /* scanf int16_t from value and check for match */ + sr = sscanf(value, "%"SCNi16, &val_arg); + if ((sr == 1) && + (nvpair_value_int16(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT16_ARRAY: { + int16_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi16, &val_arg); + if ((sr == 1) && + (nvpair_value_int16_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT16: { + uint16_t val, val_arg; + + /* scanf uint16_t from value and check for match */ + sr = sscanf(value, "%"SCNi16, (int16_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint16(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT16_ARRAY: { + uint16_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi16, (int16_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint16_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT32: { + int32_t val, val_arg; + + /* scanf int32_t from value and check for match */ + sr = sscanf(value, "%"SCNi32, &val_arg); + if ((sr == 1) && + (nvpair_value_int32(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT32_ARRAY: { + int32_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi32, &val_arg); + if ((sr == 1) && + (nvpair_value_int32_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT32: { + uint32_t val, val_arg; + + /* scanf uint32_t from value and check for match */ + sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint32(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT32_ARRAY: { + uint32_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi32, (int32_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint32_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT64: { + int64_t val, val_arg; + + /* scanf int64_t from value and check for match */ + sr = sscanf(value, "%"SCNi64, &val_arg); + if ((sr == 1) && + (nvpair_value_int64(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_INT64_ARRAY: { + int64_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi64, &val_arg); + if ((sr == 1) && + (nvpair_value_int64_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT64: { + uint64_t val_arg, val; + + /* scanf uint64_t from value and check for match */ + sr = sscanf(value, "%"SCNi64, (int64_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint64(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_UINT64_ARRAY: { + uint64_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi64, (int64_t *)&val_arg); + if ((sr == 1) && + (nvpair_value_uint64_array(nvp, &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_BOOLEAN_VALUE: { + boolean_t val, val_arg; + + /* scanf boolean_t from value and check for match */ + sr = sscanf(value, "%"SCNi32, &val_arg); + if ((sr == 1) && + (nvpair_value_boolean_value(nvp, &val) == 0) && + (val == val_arg)) + return (1); + break; + } + case DATA_TYPE_BOOLEAN_ARRAY: { + boolean_t *val_array, val_arg; + + /* check indexed value of array for match */ + sr = sscanf(value, "%"SCNi32, &val_arg); + if ((sr == 1) && + (nvpair_value_boolean_array(nvp, + &val_array, &a_len) == 0) && + (ai < a_len) && + (val_array[ai] == val_arg)) + return (1); + break; + } + case DATA_TYPE_HRTIME: + case DATA_TYPE_NVLIST: + case DATA_TYPE_NVLIST_ARRAY: + case DATA_TYPE_BOOLEAN: + case DATA_TYPE_DOUBLE: + case DATA_TYPE_UNKNOWN: + default: + /* + * unknown/unsupported data type + */ + return (-1); /* error fail match */ + } + + /* + * check to see if sscanf failed conversion, return approximate + * pointer to problem + */ + if (sr != 1) { + if (ep) + *ep = value; + return (-1); /* error fail match - syntax */ + } + + return (0); /* fail match */ +} + +int +nvpair_value_match(nvpair_t *nvp, int ai, char *value, char **ep) +{ + return (nvpair_value_match_regex(nvp, ai, value, NULL, ep)); +} diff --git a/external/cddl/osnet/dist/lib/libnvpair/libnvpair.h b/external/cddl/osnet/dist/lib/libnvpair/libnvpair.h new file mode 100644 index 000000000000..e655e0d4069d --- /dev/null +++ b/external/cddl/osnet/dist/lib/libnvpair/libnvpair.h @@ -0,0 +1,48 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBNVPAIR_H +#define _LIBNVPAIR_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +void nvlist_print(FILE *, nvlist_t *); +int nvpair_value_match(nvpair_t *, int, char *, char **); +int nvpair_value_match_regex(nvpair_t *, int, char *, regex_t *, char **); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBNVPAIR_H */ diff --git a/external/cddl/osnet/dist/lib/libnvpair/nvpair_alloc_system.c b/external/cddl/osnet/dist/lib/libnvpair/nvpair_alloc_system.c new file mode 100644 index 000000000000..1aefc1004daf --- /dev/null +++ b/external/cddl/osnet/dist/lib/libnvpair/nvpair_alloc_system.c @@ -0,0 +1,59 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2004 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +/*ARGSUSED*/ +static void * +nv_alloc_sys(nv_alloc_t *nva, size_t size) +{ + return (malloc(size)); +} + +/*ARGSUSED*/ +static void +nv_free_sys(nv_alloc_t *nva, void *buf, size_t size) +{ + free(buf); +} + +const nv_alloc_ops_t system_ops_def = { + NULL, /* nv_ao_init() */ + NULL, /* nv_ao_fini() */ + nv_alloc_sys, /* nv_ao_alloc() */ + nv_free_sys, /* nv_ao_free() */ + NULL /* nv_ao_reset() */ +}; + +nv_alloc_t nv_alloc_nosleep_def = { + &system_ops_def, + NULL +}; + +nv_alloc_t *nv_alloc_nosleep = &nv_alloc_nosleep_def; diff --git a/external/cddl/osnet/dist/lib/libshare/common/libshare.h b/external/cddl/osnet/dist/lib/libshare/common/libshare.h new file mode 100644 index 000000000000..a560b7731fcd --- /dev/null +++ b/external/cddl/osnet/dist/lib/libshare/common/libshare.h @@ -0,0 +1,281 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * basic API declarations for share management + */ + +#ifndef _LIBSHARE_H +#define _LIBSHARE_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* + * Basic datatypes for most functions + */ +typedef void *sa_group_t; +typedef void *sa_share_t; +typedef void *sa_property_t; +typedef void *sa_optionset_t; +typedef void *sa_security_t; +typedef void *sa_protocol_properties_t; +typedef void *sa_resource_t; + +typedef void *sa_handle_t; /* opaque handle to access core functions */ + +/* + * defined error values + */ + +#define SA_OK 0 +#define SA_NO_SUCH_PATH 1 /* provided path doesn't exist */ +#define SA_NO_MEMORY 2 /* no memory for data structures */ +#define SA_DUPLICATE_NAME 3 /* object name is already in use */ +#define SA_BAD_PATH 4 /* not a full path */ +#define SA_NO_SUCH_GROUP 5 /* group is not defined */ +#define SA_CONFIG_ERR 6 /* system configuration error */ +#define SA_SYSTEM_ERR 7 /* system error, use errno */ +#define SA_SYNTAX_ERR 8 /* syntax error on command line */ +#define SA_NO_PERMISSION 9 /* no permission for operation */ +#define SA_BUSY 10 /* resource is busy */ +#define SA_NO_SUCH_PROP 11 /* property doesn't exist */ +#define SA_INVALID_NAME 12 /* name of object is invalid */ +#define SA_INVALID_PROTOCOL 13 /* specified protocol not valid */ +#define SA_NOT_ALLOWED 14 /* operation not allowed */ +#define SA_BAD_VALUE 15 /* bad value for property */ +#define SA_INVALID_SECURITY 16 /* invalid security type */ +#define SA_NO_SUCH_SECURITY 17 /* security set not found */ +#define SA_VALUE_CONFLICT 18 /* property value conflict */ +#define SA_NOT_IMPLEMENTED 19 /* plugin interface not implemented */ +#define SA_INVALID_PATH 20 /* path is sub-dir of existing share */ +#define SA_NOT_SUPPORTED 21 /* operation not supported for proto */ +#define SA_PROP_SHARE_ONLY 22 /* property valid on share only */ +#define SA_NOT_SHARED 23 /* path is not shared */ +#define SA_NO_SUCH_RESOURCE 24 /* resource not found */ +#define SA_RESOURCE_REQUIRED 25 /* resource name is required */ +#define SA_MULTIPLE_ERROR 26 /* multiple protocols reported error */ +#define SA_PATH_IS_SUBDIR 27 /* check_path found path is subdir */ +#define SA_PATH_IS_PARENTDIR 28 /* check_path found path is parent */ +#define SA_NO_SECTION 29 /* protocol requires section info */ +#define SA_NO_SUCH_SECTION 30 /* no section found */ +#define SA_NO_PROPERTIES 31 /* no properties found */ +#define SA_PASSWORD_ENC 32 /* passwords must be encrypted */ + +/* API Initialization */ +#define SA_INIT_SHARE_API 0x0001 /* init share specific interface */ +#define SA_INIT_CONTROL_API 0x0002 /* init control specific interface */ + +/* not part of API returns */ +#define SA_LEGACY_ERR 32 /* share/unshare error return */ + +/* + * other defined values + */ + +#define SA_MAX_NAME_LEN 100 /* must fit service instance name */ +#define SA_MAX_RESOURCE_NAME 255 /* Maximum length of resource name */ + +/* Used in calls to sa_add_share() and sa_add_resource() */ +#define SA_SHARE_TRANSIENT 0 /* shared but not across reboot */ +#define SA_SHARE_LEGACY 1 /* share is in dfstab only */ +#define SA_SHARE_PERMANENT 2 /* share goes to repository */ + +/* sa_check_path() related */ +#define SA_CHECK_NORMAL 0 /* only check against active shares */ +#define SA_CHECK_STRICT 1 /* check against all shares */ + +/* RBAC related */ +#define SA_RBAC_MANAGE "solaris.smf.manage.shares" +#define SA_RBAC_VALUE "solaris.smf.value.shares" + +/* + * Feature set bit definitions + */ + +#define SA_FEATURE_NONE 0x0000 /* no feature flags set */ +#define SA_FEATURE_RESOURCE 0x0001 /* resource names are required */ +#define SA_FEATURE_DFSTAB 0x0002 /* need to manage in dfstab */ +#define SA_FEATURE_ALLOWSUBDIRS 0x0004 /* allow subdirs to be shared */ +#define SA_FEATURE_ALLOWPARDIRS 0x0008 /* allow parent dirs to be shared */ +#define SA_FEATURE_HAS_SECTIONS 0x0010 /* protocol supports sections */ +#define SA_FEATURE_ADD_PROPERTIES 0x0020 /* can add properties */ +#define SA_FEATURE_SERVER 0x0040 /* protocol supports server mode */ + +/* + * legacy files + */ + +#define SA_LEGACY_DFSTAB "/etc/dfs/dfstab" +#define SA_LEGACY_SHARETAB "/etc/dfs/sharetab" + +/* + * SMF related + */ + +#define SA_SVC_FMRI_BASE "svc:/network/shares/group" + +/* initialization */ +extern sa_handle_t sa_init(int); +extern void sa_fini(sa_handle_t); +extern int sa_update_config(sa_handle_t); +extern char *sa_errorstr(int); + +/* protocol names */ +extern int sa_get_protocols(char ***); +extern int sa_valid_protocol(char *); + +/* group control (create, remove, etc) */ +extern sa_group_t sa_create_group(sa_handle_t, char *, int *); +extern int sa_remove_group(sa_group_t); +extern sa_group_t sa_get_group(sa_handle_t, char *); +extern sa_group_t sa_get_next_group(sa_group_t); +extern char *sa_get_group_attr(sa_group_t, char *); +extern int sa_set_group_attr(sa_group_t, char *, char *); +extern sa_group_t sa_get_sub_group(sa_group_t); +extern int sa_valid_group_name(char *); + +/* share control */ +extern sa_share_t sa_add_share(sa_group_t, char *, int, int *); +extern int sa_check_path(sa_group_t, char *, int); +extern int sa_move_share(sa_group_t, sa_share_t); +extern int sa_remove_share(sa_share_t); +extern sa_share_t sa_get_share(sa_group_t, char *); +extern sa_share_t sa_find_share(sa_handle_t, char *); +extern sa_share_t sa_get_next_share(sa_share_t); +extern char *sa_get_share_attr(sa_share_t, char *); +extern char *sa_get_share_description(sa_share_t); +extern sa_group_t sa_get_parent_group(sa_share_t); +extern int sa_set_share_attr(sa_share_t, char *, char *); +extern int sa_set_share_description(sa_share_t, char *); +extern int sa_enable_share(sa_group_t, char *); +extern int sa_disable_share(sa_share_t, char *); +extern int sa_is_share(void *); + +/* resource name related */ +extern sa_resource_t sa_find_resource(sa_handle_t, char *); +extern sa_resource_t sa_get_resource(sa_group_t, char *); +extern sa_resource_t sa_get_next_resource(sa_resource_t); +extern sa_share_t sa_get_resource_parent(sa_resource_t); +extern sa_resource_t sa_get_share_resource(sa_share_t, char *); +extern sa_resource_t sa_add_resource(sa_share_t, char *, int, int *); +extern int sa_remove_resource(sa_resource_t); +extern char *sa_get_resource_attr(sa_resource_t, char *); +extern int sa_set_resource_attr(sa_resource_t, char *, char *); +extern int sa_set_resource_description(sa_resource_t, char *); +extern char *sa_get_resource_description(sa_resource_t); +extern int sa_enable_resource(sa_resource_t, char *); +extern int sa_disable_resource(sa_resource_t, char *); +extern int sa_rename_resource(sa_resource_t, char *); +extern void sa_fix_resource_name(char *); + +/* data structure free calls */ +extern void sa_free_attr_string(char *); +extern void sa_free_share_description(char *); + +/* optionset control */ +extern sa_optionset_t sa_get_optionset(sa_group_t, char *); +extern sa_optionset_t sa_get_next_optionset(sa_group_t); +extern char *sa_get_optionset_attr(sa_optionset_t, char *); +extern void sa_set_optionset_attr(sa_optionset_t, char *, char *); +extern sa_optionset_t sa_create_optionset(sa_group_t, char *); +extern int sa_destroy_optionset(sa_optionset_t); +extern sa_optionset_t sa_get_derived_optionset(void *, char *, int); +extern void sa_free_derived_optionset(sa_optionset_t); + +/* property functions */ +extern sa_property_t sa_get_property(sa_optionset_t, char *); +extern sa_property_t sa_get_next_property(sa_group_t); +extern char *sa_get_property_attr(sa_property_t, char *); +extern sa_property_t sa_create_section(char *, char *); +extern void sa_set_section_attr(sa_property_t, char *, char *); +extern sa_property_t sa_create_property(char *, char *); +extern int sa_add_property(void *, sa_property_t); +extern int sa_update_property(sa_property_t, char *); +extern int sa_remove_property(sa_property_t); +extern int sa_commit_properties(sa_optionset_t, int); +extern int sa_valid_property(sa_handle_t, void *, char *, sa_property_t); +extern int sa_is_persistent(void *); + +/* security control */ +extern sa_security_t sa_get_security(sa_group_t, char *, char *); +extern sa_security_t sa_get_next_security(sa_security_t); +extern char *sa_get_security_attr(sa_optionset_t, char *); +extern sa_security_t sa_create_security(sa_group_t, char *, char *); +extern int sa_destroy_security(sa_security_t); +extern void sa_set_security_attr(sa_security_t, char *, char *); +extern sa_optionset_t sa_get_all_security_types(void *, char *, int); +extern sa_security_t sa_get_derived_security(void *, char *, char *, int); +extern void sa_free_derived_security(sa_security_t); + +/* protocol specific interfaces */ +extern int sa_parse_legacy_options(sa_group_t, char *, char *); +extern char *sa_proto_legacy_format(char *, sa_group_t, int); +extern int sa_is_security(char *, char *); +extern sa_protocol_properties_t sa_proto_get_properties(char *); +extern uint64_t sa_proto_get_featureset(char *); +extern sa_property_t sa_get_protocol_section(sa_protocol_properties_t, char *); +extern sa_property_t sa_get_next_protocol_section(sa_property_t, char *); +extern sa_property_t sa_get_protocol_property(sa_protocol_properties_t, char *); +extern sa_property_t sa_get_next_protocol_property(sa_property_t, char *); +extern int sa_set_protocol_property(sa_property_t, char *, char *); +extern char *sa_get_protocol_status(char *); +extern void sa_format_free(char *); +extern sa_protocol_properties_t sa_create_protocol_properties(char *); +extern int sa_add_protocol_property(sa_protocol_properties_t, sa_property_t); +extern int sa_proto_valid_prop(sa_handle_t, char *, sa_property_t, + sa_optionset_t); +extern int sa_proto_valid_space(char *, char *); +extern char *sa_proto_space_alias(char *, char *); +extern int sa_proto_get_transients(sa_handle_t, char *); +extern int sa_proto_notify_resource(sa_resource_t, char *); +extern int sa_proto_change_notify(sa_share_t, char *); +extern int sa_proto_delete_section(char *, char *); + +/* handle legacy (dfstab/sharetab) files */ +extern int sa_delete_legacy(sa_share_t, char *); +extern int sa_update_legacy(sa_share_t, char *); +extern int sa_update_sharetab(sa_share_t, char *); +extern int sa_delete_sharetab(sa_handle_t, char *, char *); + +/* ZFS functions */ +extern int sa_zfs_is_shared(sa_handle_t, char *); +extern int sa_group_is_zfs(sa_group_t); +extern int sa_path_is_zfs(char *); + +/* SA Handle specific functions */ +extern sa_handle_t sa_find_group_handle(sa_group_t); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBSHARE_H */ diff --git a/external/cddl/osnet/dist/lib/libuutil/common/libuutil.h b/external/cddl/osnet/dist/lib/libuutil/common/libuutil.h new file mode 100644 index 000000000000..ccd46b97748e --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/libuutil.h @@ -0,0 +1,381 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBUUTIL_H +#define _LIBUUTIL_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Standard flags codes. + */ +#define UU_DEFAULT 0 + +/* + * Standard error codes. + */ +#define UU_ERROR_NONE 0 /* no error */ +#define UU_ERROR_INVALID_ARGUMENT 1 /* invalid argument */ +#define UU_ERROR_UNKNOWN_FLAG 2 /* passed flag invalid */ +#define UU_ERROR_NO_MEMORY 3 /* out of memory */ +#define UU_ERROR_CALLBACK_FAILED 4 /* callback-initiated error */ +#define UU_ERROR_NOT_SUPPORTED 5 /* operation not supported */ +#define UU_ERROR_EMPTY 6 /* no value provided */ +#define UU_ERROR_UNDERFLOW 7 /* value is too small */ +#define UU_ERROR_OVERFLOW 8 /* value is too value */ +#define UU_ERROR_INVALID_CHAR 9 /* value contains unexpected char */ +#define UU_ERROR_INVALID_DIGIT 10 /* value contains digit not in base */ + +#define UU_ERROR_SYSTEM 99 /* underlying system error */ +#define UU_ERROR_UNKNOWN 100 /* error status not known */ + +/* + * Standard program exit codes. + */ +#define UU_EXIT_OK (*(uu_exit_ok())) +#define UU_EXIT_FATAL (*(uu_exit_fatal())) +#define UU_EXIT_USAGE (*(uu_exit_usage())) + +/* + * Exit status profiles. + */ +#define UU_PROFILE_DEFAULT 0 +#define UU_PROFILE_LAUNCHER 1 + +/* + * Error reporting functions. + */ +uint32_t uu_error(void); +const char *uu_strerror(uint32_t); + +/* + * Program notification functions. + */ +extern void uu_alt_exit(int); +extern const char *uu_setpname(char *); +extern const char *uu_getpname(void); +/*PRINTFLIKE1*/ +extern void uu_warn(const char *, ...); +extern void uu_vwarn(const char *, va_list); +/*PRINTFLIKE1*/ +extern void uu_die(const char *, ...) __NORETURN; +extern void uu_vdie(const char *, va_list) __NORETURN; +/*PRINTFLIKE2*/ +extern void uu_xdie(int, const char *, ...) __NORETURN; +extern void uu_vxdie(int, const char *, va_list) __NORETURN; + +/* + * Exit status functions (not to be used directly) + */ +extern int *uu_exit_ok(void); +extern int *uu_exit_fatal(void); +extern int *uu_exit_usage(void); + +/* + * string->number conversions + */ +extern int uu_strtoint(const char *, void *, size_t, int, int64_t, int64_t); +extern int uu_strtouint(const char *, void *, size_t, int, uint64_t, uint64_t); + +/* + * Debug print facility functions. + */ +typedef struct uu_dprintf uu_dprintf_t; + +typedef enum { + UU_DPRINTF_SILENT, + UU_DPRINTF_FATAL, + UU_DPRINTF_WARNING, + UU_DPRINTF_NOTICE, + UU_DPRINTF_INFO, + UU_DPRINTF_DEBUG +} uu_dprintf_severity_t; + +extern uu_dprintf_t *uu_dprintf_create(const char *, uu_dprintf_severity_t, + uint_t); +/*PRINTFLIKE3*/ +extern void uu_dprintf(uu_dprintf_t *, uu_dprintf_severity_t, + const char *, ...); +extern void uu_dprintf_destroy(uu_dprintf_t *); +extern const char *uu_dprintf_getname(uu_dprintf_t *); + +/* + * Identifier test flags and function. + */ +#define UU_NAME_DOMAIN 0x1 /* allow SUNW, or com.sun, prefix */ +#define UU_NAME_PATH 0x2 /* allow '/'-delimited paths */ + +int uu_check_name(const char *, uint_t); + +/* + * File creation functions. + */ +extern int uu_open_tmp(const char *dir, uint_t uflags); + +/* + * Convenience functions. + */ +/*PRINTFLIKE1*/ +extern char *uu_msprintf(const char *format, ...); +extern void *uu_zalloc(size_t); +extern char *uu_strdup(const char *); +extern void uu_free(void *); + +/* + * Comparison function type definition. + * Developers should be careful in their use of the _private argument. If you + * break interface guarantees, you get undefined behavior. + */ +typedef int uu_compare_fn_t(const void *__left, const void *__right, + void *__private); + +/* + * Walk variant flags. + * A data structure need not provide support for all variants and + * combinations. Refer to the appropriate documentation. + */ +#define UU_WALK_ROBUST 0x00000001 /* walk can survive removes */ +#define UU_WALK_REVERSE 0x00000002 /* reverse walk order */ + +#define UU_WALK_PREORDER 0x00000010 /* walk tree in pre-order */ +#define UU_WALK_POSTORDER 0x00000020 /* walk tree in post-order */ + +/* + * Walk callback function return codes. + */ +#define UU_WALK_ERROR -1 +#define UU_WALK_NEXT 0 +#define UU_WALK_DONE 1 + +/* + * Walk callback function type definition. + */ +typedef int uu_walk_fn_t(void *_elem, void *_private); + +/* + * lists: opaque structures + */ +typedef struct uu_list_pool uu_list_pool_t; +typedef struct uu_list uu_list_t; + +typedef struct uu_list_node { + uintptr_t uln_opaque[2]; +} uu_list_node_t; + +typedef struct uu_list_walk uu_list_walk_t; + +typedef uintptr_t uu_list_index_t; + +/* + * lists: interface + * + * basic usage: + * typedef struct foo { + * ... + * uu_list_node_t foo_node; + * ... + * } foo_t; + * + * static int + * foo_compare(void *l_arg, void *r_arg, void *private) + * { + * foo_t *l = l_arg; + * foo_t *r = r_arg; + * + * if (... l greater than r ...) + * return (1); + * if (... l less than r ...) + * return (-1); + * return (0); + * } + * + * ... + * // at initialization time + * foo_pool = uu_list_pool_create("foo_pool", + * sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare, + * debugging? 0 : UU_AVL_POOL_DEBUG); + * ... + */ +uu_list_pool_t *uu_list_pool_create(const char *, size_t, size_t, + uu_compare_fn_t *, uint32_t); +#define UU_LIST_POOL_DEBUG 0x00000001 + +void uu_list_pool_destroy(uu_list_pool_t *); + +/* + * usage: + * + * foo_t *a; + * a = malloc(sizeof(*a)); + * uu_list_node_init(a, &a->foo_list, pool); + * ... + * uu_list_node_fini(a, &a->foo_list, pool); + * free(a); + */ +void uu_list_node_init(void *, uu_list_node_t *, uu_list_pool_t *); +void uu_list_node_fini(void *, uu_list_node_t *, uu_list_pool_t *); + +uu_list_t *uu_list_create(uu_list_pool_t *, void *_parent, uint32_t); +#define UU_LIST_DEBUG 0x00000001 +#define UU_LIST_SORTED 0x00000002 /* list is sorted */ + +void uu_list_destroy(uu_list_t *); /* list must be empty */ + +size_t uu_list_numnodes(uu_list_t *); + +void *uu_list_first(uu_list_t *); +void *uu_list_last(uu_list_t *); + +void *uu_list_next(uu_list_t *, void *); +void *uu_list_prev(uu_list_t *, void *); + +int uu_list_walk(uu_list_t *, uu_walk_fn_t *, void *, uint32_t); + +uu_list_walk_t *uu_list_walk_start(uu_list_t *, uint32_t); +void *uu_list_walk_next(uu_list_walk_t *); +void uu_list_walk_end(uu_list_walk_t *); + +void *uu_list_find(uu_list_t *, void *, void *, uu_list_index_t *); +void uu_list_insert(uu_list_t *, void *, uu_list_index_t); + +void *uu_list_nearest_next(uu_list_t *, uu_list_index_t); +void *uu_list_nearest_prev(uu_list_t *, uu_list_index_t); + +void *uu_list_teardown(uu_list_t *, void **); + +void uu_list_remove(uu_list_t *, void *); + +/* + * lists: interfaces for non-sorted lists only + */ +int uu_list_insert_before(uu_list_t *, void *_target, void *_elem); +int uu_list_insert_after(uu_list_t *, void *_target, void *_elem); + +/* + * avl trees: opaque structures + */ +typedef struct uu_avl_pool uu_avl_pool_t; +typedef struct uu_avl uu_avl_t; + +typedef struct uu_avl_node { +#ifdef _LP64 + uintptr_t uan_opaque[3]; +#else + uintptr_t uan_opaque[4]; +#endif +} uu_avl_node_t; + +typedef struct uu_avl_walk uu_avl_walk_t; + +typedef uintptr_t uu_avl_index_t; + +/* + * avl trees: interface + * + * basic usage: + * typedef struct foo { + * ... + * uu_avl_node_t foo_node; + * ... + * } foo_t; + * + * static int + * foo_compare(void *l_arg, void *r_arg, void *private) + * { + * foo_t *l = l_arg; + * foo_t *r = r_arg; + * + * if (... l greater than r ...) + * return (1); + * if (... l less than r ...) + * return (-1); + * return (0); + * } + * + * ... + * // at initialization time + * foo_pool = uu_avl_pool_create("foo_pool", + * sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare, + * debugging? 0 : UU_AVL_POOL_DEBUG); + * ... + */ +uu_avl_pool_t *uu_avl_pool_create(const char *, size_t, size_t, + uu_compare_fn_t *, uint32_t); +#define UU_AVL_POOL_DEBUG 0x00000001 + +void uu_avl_pool_destroy(uu_avl_pool_t *); + +/* + * usage: + * + * foo_t *a; + * a = malloc(sizeof(*a)); + * uu_avl_node_init(a, &a->foo_avl, pool); + * ... + * uu_avl_node_fini(a, &a->foo_avl, pool); + * free(a); + */ +void uu_avl_node_init(void *, uu_avl_node_t *, uu_avl_pool_t *); +void uu_avl_node_fini(void *, uu_avl_node_t *, uu_avl_pool_t *); + +uu_avl_t *uu_avl_create(uu_avl_pool_t *, void *_parent, uint32_t); +#define UU_AVL_DEBUG 0x00000001 + +void uu_avl_destroy(uu_avl_t *); /* list must be empty */ + +size_t uu_avl_numnodes(uu_avl_t *); + +void *uu_avl_first(uu_avl_t *); +void *uu_avl_last(uu_avl_t *); + +void *uu_avl_next(uu_avl_t *, void *); +void *uu_avl_prev(uu_avl_t *, void *); + +int uu_avl_walk(uu_avl_t *, uu_walk_fn_t *, void *, uint32_t); + +uu_avl_walk_t *uu_avl_walk_start(uu_avl_t *, uint32_t); +void *uu_avl_walk_next(uu_avl_walk_t *); +void uu_avl_walk_end(uu_avl_walk_t *); + +void *uu_avl_find(uu_avl_t *, void *, void *, uu_avl_index_t *); +void uu_avl_insert(uu_avl_t *, void *, uu_avl_index_t); + +void *uu_avl_nearest_next(uu_avl_t *, uu_avl_index_t); +void *uu_avl_nearest_prev(uu_avl_t *, uu_avl_index_t); + +void *uu_avl_teardown(uu_avl_t *, void **); + +void uu_avl_remove(uu_avl_t *, void *); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBUUTIL_H */ diff --git a/external/cddl/osnet/dist/lib/libuutil/common/libuutil_common.h b/external/cddl/osnet/dist/lib/libuutil/common/libuutil_common.h new file mode 100644 index 000000000000..9ebaaedfd237 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/libuutil_common.h @@ -0,0 +1,35 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBUUTIL_COMMON_H +#define _LIBUUTIL_COMMON_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#endif /* _LIBUUTIL_COMMON_H */ diff --git a/external/cddl/osnet/dist/lib/libuutil/common/libuutil_impl.h b/external/cddl/osnet/dist/lib/libuutil/common/libuutil_impl.h new file mode 100644 index 000000000000..9466e5974581 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/libuutil_impl.h @@ -0,0 +1,181 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBUUTIL_IMPL_H +#define _LIBUUTIL_IMPL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +void uu_set_error(uint_t); +#pragma rarely_called(uu_set_error) + +/*PRINTFLIKE1*/ +void uu_panic(const char *format, ...); +#pragma rarely_called(uu_panic) + +struct uu_dprintf { + char *uud_name; + uu_dprintf_severity_t uud_severity; + uint_t uud_flags; +}; + +/* + * For debugging purposes, libuutil keeps around linked lists of all uu_lists + * and uu_avls, along with pointers to their parents. These can cause false + * negatives when looking for memory leaks, so we encode the pointers by + * storing them with swapped endianness; this is not perfect, but it's about + * the best we can do without wasting a lot of space. + */ +#ifdef _LP64 +#define UU_PTR_ENCODE(ptr) BSWAP_64((uintptr_t)(void *)(ptr)) +#else +#define UU_PTR_ENCODE(ptr) BSWAP_32((uintptr_t)(void *)(ptr)) +#endif + +#define UU_PTR_DECODE(ptr) ((void *)UU_PTR_ENCODE(ptr)) + +/* + * uu_list structures + */ +typedef struct uu_list_node_impl { + struct uu_list_node_impl *uln_next; + struct uu_list_node_impl *uln_prev; +} uu_list_node_impl_t; + +struct uu_list_walk { + uu_list_walk_t *ulw_next; + uu_list_walk_t *ulw_prev; + + uu_list_t *ulw_list; + int8_t ulw_dir; + uint8_t ulw_robust; + uu_list_node_impl_t *ulw_next_result; +}; + +struct uu_list { + uintptr_t ul_next_enc; + uintptr_t ul_prev_enc; + + uu_list_pool_t *ul_pool; + uintptr_t ul_parent_enc; /* encoded parent pointer */ + size_t ul_offset; + size_t ul_numnodes; + uint8_t ul_debug; + uint8_t ul_sorted; + uint8_t ul_index; /* mark for uu_list_index_ts */ + + uu_list_node_impl_t ul_null_node; + uu_list_walk_t ul_null_walk; /* for robust walkers */ +}; + +#define UU_LIST_PTR(ptr) ((uu_list_t *)UU_PTR_DECODE(ptr)) + +#define UU_LIST_POOL_MAXNAME 64 + +struct uu_list_pool { + uu_list_pool_t *ulp_next; + uu_list_pool_t *ulp_prev; + + char ulp_name[UU_LIST_POOL_MAXNAME]; + size_t ulp_nodeoffset; + size_t ulp_objsize; + uu_compare_fn_t *ulp_cmp; + uint8_t ulp_debug; + uint8_t ulp_last_index; + pthread_mutex_t ulp_lock; /* protects null_list */ + uu_list_t ulp_null_list; +}; + +/* + * uu_avl structures + */ +typedef struct avl_node uu_avl_node_impl_t; + +struct uu_avl_walk { + uu_avl_walk_t *uaw_next; + uu_avl_walk_t *uaw_prev; + + uu_avl_t *uaw_avl; + void *uaw_next_result; + int8_t uaw_dir; + uint8_t uaw_robust; +}; + +struct uu_avl { + uintptr_t ua_next_enc; + uintptr_t ua_prev_enc; + + uu_avl_pool_t *ua_pool; + uintptr_t ua_parent_enc; + uint8_t ua_debug; + uint8_t ua_index; /* mark for uu_avl_index_ts */ + + struct avl_tree ua_tree; + uu_avl_walk_t ua_null_walk; +}; + +#define UU_AVL_PTR(x) ((uu_avl_t *)UU_PTR_DECODE(x)) + +#define UU_AVL_POOL_MAXNAME 64 + +struct uu_avl_pool { + uu_avl_pool_t *uap_next; + uu_avl_pool_t *uap_prev; + + char uap_name[UU_AVL_POOL_MAXNAME]; + size_t uap_nodeoffset; + size_t uap_objsize; + uu_compare_fn_t *uap_cmp; + uint8_t uap_debug; + uint8_t uap_last_index; + pthread_mutex_t uap_lock; /* protects null_avl */ + uu_avl_t uap_null_avl; +}; + +/* + * atfork() handlers + */ +void uu_avl_lockup(void); +void uu_avl_release(void); + +void uu_list_lockup(void); +void uu_list_release(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBUUTIL_IMPL_H */ diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_alloc.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_alloc.c new file mode 100644 index 000000000000..05d8622871fa --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_alloc.c @@ -0,0 +1,98 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include "libuutil_common.h" + +#include +#include +#include +#include + +void * +uu_zalloc(size_t n) +{ + void *p = malloc(n); + + if (p == NULL) { + uu_set_error(UU_ERROR_SYSTEM); + return (NULL); + } + + (void) memset(p, 0, n); + + return (p); +} + +void +uu_free(void *p) +{ + free(p); +} + +char * +uu_strdup(const char *str) +{ + char *buf = NULL; + + if (str != NULL) { + size_t sz; + + sz = strlen(str) + 1; + buf = uu_zalloc(sz); + if (buf != NULL) + (void) memcpy(buf, str, sz); + } + return (buf); +} + +char * +uu_msprintf(const char *format, ...) +{ + va_list args; + char attic[1]; + uint_t M, m; + char *b; + + va_start(args, format); + M = vsnprintf(attic, 1, format, args); + va_end(args); + + for (;;) { + m = M; + if ((b = uu_zalloc(m + 1)) == NULL) + return (NULL); + + va_start(args, format); + M = vsnprintf(b, m + 1, format, args); + va_end(args); + + if (M == m) + break; /* sizes match */ + + uu_free(b); + } + + return (b); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_avl.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_avl.c new file mode 100644 index 000000000000..308e9208fea4 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_avl.c @@ -0,0 +1,569 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include +#include +#include + +static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool }; +static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER; + +/* + * The index mark change on every insert and delete, to catch stale + * references. + * + * We leave the low bit alone, since the avl code uses it. + */ +#define INDEX_MAX (sizeof (uintptr_t) - 2) +#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX) + +#define INDEX_DECODE(i) ((i) & ~INDEX_MAX) +#define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index) +#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index) +#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0) + +/* + * When an element is inactive (not in a tree), we keep a marked pointer to + * its containing pool in its first word, and a NULL pointer in its second. + * + * On insert, we use these to verify that it comes from the correct pool. + */ +#define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \ + (pp)->uap_nodeoffset)) + +#define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1)) + +#define DEAD_MARKER 0xc4 + +uu_avl_pool_t * +uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset, + uu_compare_fn_t *compare_func, uint32_t flags) +{ + uu_avl_pool_t *pp, *next, *prev; + + if (name == NULL || + uu_check_name(name, UU_NAME_DOMAIN) == -1 || + nodeoffset + sizeof (uu_avl_node_t) > objsize || + compare_func == NULL) { + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (NULL); + } + + if (flags & ~UU_AVL_POOL_DEBUG) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + pp = uu_zalloc(sizeof (uu_avl_pool_t)); + if (pp == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + (void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name)); + pp->uap_nodeoffset = nodeoffset; + pp->uap_objsize = objsize; + pp->uap_cmp = compare_func; + if (flags & UU_AVL_POOL_DEBUG) + pp->uap_debug = 1; + pp->uap_last_index = 0; + + (void) pthread_mutex_init(&pp->uap_lock, NULL); + + pp->uap_null_avl.ua_next_enc = UU_PTR_ENCODE(&pp->uap_null_avl); + pp->uap_null_avl.ua_prev_enc = UU_PTR_ENCODE(&pp->uap_null_avl); + + (void) pthread_mutex_lock(&uu_apool_list_lock); + pp->uap_next = next = &uu_null_apool; + pp->uap_prev = prev = next->uap_prev; + next->uap_prev = pp; + prev->uap_next = pp; + (void) pthread_mutex_unlock(&uu_apool_list_lock); + + return (pp); +} + +void +uu_avl_pool_destroy(uu_avl_pool_t *pp) +{ + if (pp->uap_debug) { + if (pp->uap_null_avl.ua_next_enc != + UU_PTR_ENCODE(&pp->uap_null_avl) || + pp->uap_null_avl.ua_prev_enc != + UU_PTR_ENCODE(&pp->uap_null_avl)) { + uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has " + "outstanding avls, or is corrupt.\n", + (int)sizeof (pp->uap_name), pp->uap_name, + (void *)pp); + } + } + (void) pthread_mutex_lock(&uu_apool_list_lock); + pp->uap_next->uap_prev = pp->uap_prev; + pp->uap_prev->uap_next = pp->uap_next; + (void) pthread_mutex_unlock(&uu_apool_list_lock); + pp->uap_prev = NULL; + pp->uap_next = NULL; + uu_free(pp); +} + +void +uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp) +{ + uintptr_t *na = (uintptr_t *)np; + + if (pp->uap_debug) { + uintptr_t offset = (uintptr_t)np - (uintptr_t)base; + if (offset + sizeof (*np) > pp->uap_objsize) { + uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): " + "offset %ld doesn't fit in object (size %ld)\n", + base, (void *)np, (void *)pp, pp->uap_name, + (long)offset, (long)pp->uap_objsize); + } + if (offset != pp->uap_nodeoffset) { + uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): " + "offset %ld doesn't match pool's offset (%ld)\n", + base, (void *)np, (void *)pp, pp->uap_name, + (long)offset, (long)pp->uap_objsize); + } + } + + na[0] = POOL_TO_MARKER(pp); + na[1] = 0; +} + +void +uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp) +{ + uintptr_t *na = (uintptr_t *)np; + + if (pp->uap_debug) { + if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) { + uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): " + "node already finied\n", + base, (void *)np, (void *)pp, pp->uap_name); + } + if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) { + uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): " + "node corrupt, in tree, or in different pool\n", + base, (void *)np, (void *)pp, pp->uap_name); + } + } + + na[0] = DEAD_MARKER; + na[1] = DEAD_MARKER; + na[2] = DEAD_MARKER; +} + +struct uu_avl_node_compare_info { + uu_compare_fn_t *ac_compare; + void *ac_private; + void *ac_right; + void *ac_found; +}; + +static int +uu_avl_node_compare(const void *l, const void *r) +{ + struct uu_avl_node_compare_info *info = + (struct uu_avl_node_compare_info *)l; + + int res = info->ac_compare(r, info->ac_right, info->ac_private); + + if (res == 0) { + if (info->ac_found == NULL) + info->ac_found = (void *)r; + return (-1); + } + if (res < 0) + return (1); + return (-1); +} + +uu_avl_t * +uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags) +{ + uu_avl_t *ap, *next, *prev; + + if (flags & ~UU_AVL_DEBUG) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + ap = uu_zalloc(sizeof (*ap)); + if (ap == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + ap->ua_pool = pp; + ap->ua_parent_enc = UU_PTR_ENCODE(parent); + ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG); + ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index)); + + avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize, + pp->uap_nodeoffset); + + ap->ua_null_walk.uaw_next = &ap->ua_null_walk; + ap->ua_null_walk.uaw_prev = &ap->ua_null_walk; + + (void) pthread_mutex_lock(&pp->uap_lock); + next = &pp->uap_null_avl; + prev = UU_PTR_DECODE(next->ua_prev_enc); + ap->ua_next_enc = UU_PTR_ENCODE(next); + ap->ua_prev_enc = UU_PTR_ENCODE(prev); + next->ua_prev_enc = UU_PTR_ENCODE(ap); + prev->ua_next_enc = UU_PTR_ENCODE(ap); + (void) pthread_mutex_unlock(&pp->uap_lock); + + return (ap); +} + +void +uu_avl_destroy(uu_avl_t *ap) +{ + uu_avl_pool_t *pp = ap->ua_pool; + + if (ap->ua_debug) { + if (avl_numnodes(&ap->ua_tree) != 0) { + uu_panic("uu_avl_destroy(%p): tree not empty\n", + (void *)ap); + } + if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk || + ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) { + uu_panic("uu_avl_destroy(%p): outstanding walkers\n", + (void *)ap); + } + } + (void) pthread_mutex_lock(&pp->uap_lock); + UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc; + UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc; + (void) pthread_mutex_unlock(&pp->uap_lock); + ap->ua_prev_enc = UU_PTR_ENCODE(NULL); + ap->ua_next_enc = UU_PTR_ENCODE(NULL); + + ap->ua_pool = NULL; + avl_destroy(&ap->ua_tree); + + uu_free(ap); +} + +size_t +uu_avl_numnodes(uu_avl_t *ap) +{ + return (avl_numnodes(&ap->ua_tree)); +} + +void * +uu_avl_first(uu_avl_t *ap) +{ + return (avl_first(&ap->ua_tree)); +} + +void * +uu_avl_last(uu_avl_t *ap) +{ + return (avl_last(&ap->ua_tree)); +} + +void * +uu_avl_next(uu_avl_t *ap, void *node) +{ + return (AVL_NEXT(&ap->ua_tree, node)); +} + +void * +uu_avl_prev(uu_avl_t *ap, void *node) +{ + return (AVL_PREV(&ap->ua_tree, node)); +} + +static void +_avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags) +{ + uu_avl_walk_t *next, *prev; + + int robust = (flags & UU_WALK_ROBUST); + int direction = (flags & UU_WALK_REVERSE)? -1 : 1; + + (void) memset(wp, 0, sizeof (*wp)); + wp->uaw_avl = ap; + wp->uaw_robust = robust; + wp->uaw_dir = direction; + + if (direction > 0) + wp->uaw_next_result = avl_first(&ap->ua_tree); + else + wp->uaw_next_result = avl_last(&ap->ua_tree); + + if (ap->ua_debug || robust) { + wp->uaw_next = next = &ap->ua_null_walk; + wp->uaw_prev = prev = next->uaw_prev; + next->uaw_prev = wp; + prev->uaw_next = wp; + } +} + +static void * +_avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap) +{ + void *np = wp->uaw_next_result; + + avl_tree_t *t = &ap->ua_tree; + + if (np == NULL) + return (NULL); + + wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) : + AVL_PREV(t, np); + + return (np); +} + +static void +_avl_walk_fini(uu_avl_walk_t *wp) +{ + if (wp->uaw_next != NULL) { + wp->uaw_next->uaw_prev = wp->uaw_prev; + wp->uaw_prev->uaw_next = wp->uaw_next; + wp->uaw_next = NULL; + wp->uaw_prev = NULL; + } + wp->uaw_avl = NULL; + wp->uaw_next_result = NULL; +} + +uu_avl_walk_t * +uu_avl_walk_start(uu_avl_t *ap, uint32_t flags) +{ + uu_avl_walk_t *wp; + + if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + wp = uu_zalloc(sizeof (*wp)); + if (wp == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + _avl_walk_init(wp, ap, flags); + return (wp); +} + +void * +uu_avl_walk_next(uu_avl_walk_t *wp) +{ + return (_avl_walk_advance(wp, wp->uaw_avl)); +} + +void +uu_avl_walk_end(uu_avl_walk_t *wp) +{ + _avl_walk_fini(wp); + uu_free(wp); +} + +int +uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags) +{ + void *e; + uu_avl_walk_t my_walk; + + int status = UU_WALK_NEXT; + + if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (-1); + } + + _avl_walk_init(&my_walk, ap, flags); + while (status == UU_WALK_NEXT && + (e = _avl_walk_advance(&my_walk, ap)) != NULL) + status = (*func)(e, private); + _avl_walk_fini(&my_walk); + + if (status >= 0) + return (0); + uu_set_error(UU_ERROR_CALLBACK_FAILED); + return (-1); +} + +void +uu_avl_remove(uu_avl_t *ap, void *elem) +{ + uu_avl_walk_t *wp; + uu_avl_pool_t *pp = ap->ua_pool; + uintptr_t *na = NODE_ARRAY(pp, elem); + + if (ap->ua_debug) { + /* + * invalidate outstanding uu_avl_index_ts. + */ + ap->ua_index = INDEX_NEXT(ap->ua_index); + } + + /* + * Robust walkers most be advanced, if we are removing the node + * they are currently using. In debug mode, non-robust walkers + * are also on the walker list. + */ + for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk; + wp = wp->uaw_next) { + if (wp->uaw_robust) { + if (elem == wp->uaw_next_result) + (void) _avl_walk_advance(wp, ap); + } else if (wp->uaw_next_result != NULL) { + uu_panic("uu_avl_remove(%p, %p): active non-robust " + "walker\n", (void *)ap, elem); + } + } + + avl_remove(&ap->ua_tree, elem); + + na[0] = POOL_TO_MARKER(pp); + na[1] = 0; +} + +void * +uu_avl_teardown(uu_avl_t *ap, void **cookie) +{ + void *elem = avl_destroy_nodes(&ap->ua_tree, cookie); + + if (elem != NULL) { + uu_avl_pool_t *pp = ap->ua_pool; + uintptr_t *na = NODE_ARRAY(pp, elem); + + na[0] = POOL_TO_MARKER(pp); + na[1] = 0; + } + return (elem); +} + +void * +uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out) +{ + struct uu_avl_node_compare_info info; + void *result; + + info.ac_compare = ap->ua_pool->uap_cmp; + info.ac_private = private; + info.ac_right = elem; + info.ac_found = NULL; + + result = avl_find(&ap->ua_tree, &info, out); + if (out != NULL) + *out = INDEX_ENCODE(ap, *out); + + if (ap->ua_debug && result != NULL) + uu_panic("uu_avl_find: internal error: avl_find succeeded\n"); + + return (info.ac_found); +} + +void +uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx) +{ + if (ap->ua_debug) { + uu_avl_pool_t *pp = ap->ua_pool; + uintptr_t *na = NODE_ARRAY(pp, elem); + + if (na[1] != 0) + uu_panic("uu_avl_insert(%p, %p, %p): node already " + "in tree, or corrupt\n", + (void *)ap, elem, (void *)idx); + if (na[0] == 0) + uu_panic("uu_avl_insert(%p, %p, %p): node not " + "initialized\n", + (void *)ap, elem, (void *)idx); + if (na[0] != POOL_TO_MARKER(pp)) + uu_panic("uu_avl_insert(%p, %p, %p): node from " + "other pool, or corrupt\n", + (void *)ap, elem, (void *)idx); + + if (!INDEX_VALID(ap, idx)) + uu_panic("uu_avl_insert(%p, %p, %p): %s\n", + (void *)ap, elem, (void *)idx, + INDEX_CHECK(idx)? "outdated index" : + "invalid index"); + + /* + * invalidate outstanding uu_avl_index_ts. + */ + ap->ua_index = INDEX_NEXT(ap->ua_index); + } + avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx)); +} + +void * +uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx) +{ + if (ap->ua_debug && !INDEX_VALID(ap, idx)) + uu_panic("uu_avl_nearest_next(%p, %p): %s\n", + (void *)ap, (void *)idx, INDEX_CHECK(idx)? + "outdated index" : "invalid index"); + return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER)); +} + +void * +uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx) +{ + if (ap->ua_debug && !INDEX_VALID(ap, idx)) + uu_panic("uu_avl_nearest_prev(%p, %p): %s\n", + (void *)ap, (void *)idx, INDEX_CHECK(idx)? + "outdated index" : "invalid index"); + return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE)); +} + +/* + * called from uu_lockup() and uu_release(), as part of our fork1()-safety. + */ +void +uu_avl_lockup(void) +{ + uu_avl_pool_t *pp; + + (void) pthread_mutex_lock(&uu_apool_list_lock); + for (pp = uu_null_apool.uap_next; pp != &uu_null_apool; + pp = pp->uap_next) + (void) pthread_mutex_lock(&pp->uap_lock); +} + +void +uu_avl_release(void) +{ + uu_avl_pool_t *pp; + + for (pp = uu_null_apool.uap_next; pp != &uu_null_apool; + pp = pp->uap_next) + (void) pthread_mutex_unlock(&pp->uap_lock); + (void) pthread_mutex_unlock(&uu_apool_list_lock); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_dprintf.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_dprintf.c new file mode 100644 index 000000000000..5b990a52b561 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_dprintf.c @@ -0,0 +1,128 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2004 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include +#include +#include +#include +#include + +#define FACILITY_FMT "%s (%s): " + +#if !defined(TEXT_DOMAIN) +#define TEXT_DOMAIN "SYS_TEST" +#endif + +static const char * +strseverity(uu_dprintf_severity_t severity) +{ + switch (severity) { + case UU_DPRINTF_SILENT: + return (dgettext(TEXT_DOMAIN, "silent")); + case UU_DPRINTF_FATAL: + return (dgettext(TEXT_DOMAIN, "FATAL")); + case UU_DPRINTF_WARNING: + return (dgettext(TEXT_DOMAIN, "WARNING")); + case UU_DPRINTF_NOTICE: + return (dgettext(TEXT_DOMAIN, "note")); + case UU_DPRINTF_INFO: + return (dgettext(TEXT_DOMAIN, "info")); + case UU_DPRINTF_DEBUG: + return (dgettext(TEXT_DOMAIN, "debug")); + default: + return (dgettext(TEXT_DOMAIN, "unspecified")); + } +} + +uu_dprintf_t * +uu_dprintf_create(const char *name, uu_dprintf_severity_t severity, + uint_t flags) +{ + uu_dprintf_t *D; + + if (uu_check_name(name, UU_NAME_DOMAIN) == -1) { + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (NULL); + } + + if ((D = uu_zalloc(sizeof (uu_dprintf_t))) == NULL) + return (NULL); + + if (name != NULL) { + D->uud_name = strdup(name); + if (D->uud_name == NULL) { + uu_free(D); + return (NULL); + } + } else { + D->uud_name = NULL; + } + + D->uud_severity = severity; + D->uud_flags = flags; + + return (D); +} + +/*PRINTFLIKE3*/ +void +uu_dprintf(uu_dprintf_t *D, uu_dprintf_severity_t severity, + const char *format, ...) +{ + va_list alist; + + /* XXX Assert that severity is not UU_DPRINTF_SILENT. */ + + if (severity > D->uud_severity) + return; + + (void) fprintf(stderr, FACILITY_FMT, D->uud_name, + strseverity(severity)); + + va_start(alist, format); + (void) vfprintf(stderr, format, alist); + va_end(alist); +} + +void +uu_dprintf_destroy(uu_dprintf_t *D) +{ + if (D->uud_name) + free(D->uud_name); + + uu_free(D); +} + +const char * +uu_dprintf_getname(uu_dprintf_t *D) +{ + return (D->uud_name); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_ident.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_ident.c new file mode 100644 index 000000000000..9a643845f8c2 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_ident.c @@ -0,0 +1,122 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2004 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include + +/* + * We require names of the form: + * [provider,]identifier[/[provider,]identifier]... + * + * Where provider is either a stock symbol (SUNW) or a java-style reversed + * domain name (com.sun). + * + * Both providers and identifiers must start with a letter, and may + * only contain alphanumerics, dashes, and underlines. Providers + * may also contain periods. + * + * Note that we do _not_ use the macros in , since they are affected + * by the current locale settings. + */ + +#define IS_ALPHA(c) \ + (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) + +#define IS_DIGIT(c) \ + ((c) >= '0' && (c) <= '9') + +static int +is_valid_ident(const char *s, const char *e, int allowdot) +{ + char c; + + if (s >= e) + return (0); /* name is empty */ + + c = *s++; + if (!IS_ALPHA(c)) + return (0); /* does not start with letter */ + + while (s < e && (c = *s++) != 0) { + if (IS_ALPHA(c) || IS_DIGIT(c) || c == '-' || c == '_' || + (allowdot && c == '.')) + continue; + return (0); /* invalid character */ + } + return (1); +} + +static int +is_valid_component(const char *b, const char *e, uint_t flags) +{ + char *sp; + + if (flags & UU_NAME_DOMAIN) { + sp = strchr(b, ','); + if (sp != NULL && sp < e) { + if (!is_valid_ident(b, sp, 1)) + return (0); + b = sp + 1; + } + } + + return (is_valid_ident(b, e, 0)); +} + +int +uu_check_name(const char *name, uint_t flags) +{ + const char *end = name + strlen(name); + const char *p; + + if (flags & ~(UU_NAME_DOMAIN | UU_NAME_PATH)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (-1); + } + + if (!(flags & UU_NAME_PATH)) { + if (!is_valid_component(name, end, flags)) + goto bad; + return (0); + } + + while ((p = strchr(name, '/')) != NULL) { + if (!is_valid_component(name, p - 1, flags)) + goto bad; + name = p + 1; + } + if (!is_valid_component(name, end, flags)) + goto bad; + + return (0); + +bad: + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (-1); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_list.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_list.c new file mode 100644 index 000000000000..35c7ba800103 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_list.c @@ -0,0 +1,718 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include +#include +#include + +#define ELEM_TO_NODE(lp, e) \ + ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset)) + +#define NODE_TO_ELEM(lp, n) \ + ((void *)((uintptr_t)(n) - (lp)->ul_offset)) + +/* + * uu_list_index_ts define a location for insertion. They are simply a + * pointer to the object after the insertion point. We store a mark + * in the low-bits of the index, to help prevent mistakes. + * + * When debugging, the index mark changes on every insert and delete, to + * catch stale references. + */ +#define INDEX_MAX (sizeof (uintptr_t) - 1) +#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX) + +#define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX)) +#define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index) +#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index) +#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0) + +#define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1)) + +static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool }; +static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER; + +uu_list_pool_t * +uu_list_pool_create(const char *name, size_t objsize, + size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags) +{ + uu_list_pool_t *pp, *next, *prev; + + if (name == NULL || + uu_check_name(name, UU_NAME_DOMAIN) == -1 || + nodeoffset + sizeof (uu_list_node_t) > objsize) { + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (NULL); + } + + if (flags & ~UU_LIST_POOL_DEBUG) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + pp = uu_zalloc(sizeof (uu_list_pool_t)); + if (pp == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + (void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name)); + pp->ulp_nodeoffset = nodeoffset; + pp->ulp_objsize = objsize; + pp->ulp_cmp = compare_func; + if (flags & UU_LIST_POOL_DEBUG) + pp->ulp_debug = 1; + pp->ulp_last_index = 0; + + (void) pthread_mutex_init(&pp->ulp_lock, NULL); + + pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list); + pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list); + + (void) pthread_mutex_lock(&uu_lpool_list_lock); + pp->ulp_next = next = &uu_null_lpool; + pp->ulp_prev = prev = next->ulp_prev; + next->ulp_prev = pp; + prev->ulp_next = pp; + (void) pthread_mutex_unlock(&uu_lpool_list_lock); + + return (pp); +} + +void +uu_list_pool_destroy(uu_list_pool_t *pp) +{ + if (pp->ulp_debug) { + if (pp->ulp_null_list.ul_next_enc != + UU_PTR_ENCODE(&pp->ulp_null_list) || + pp->ulp_null_list.ul_prev_enc != + UU_PTR_ENCODE(&pp->ulp_null_list)) { + uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has " + "outstanding lists, or is corrupt.\n", + (int)sizeof (pp->ulp_name), pp->ulp_name, + (void *)pp); + } + } + (void) pthread_mutex_lock(&uu_lpool_list_lock); + pp->ulp_next->ulp_prev = pp->ulp_prev; + pp->ulp_prev->ulp_next = pp->ulp_next; + (void) pthread_mutex_unlock(&uu_lpool_list_lock); + pp->ulp_prev = NULL; + pp->ulp_next = NULL; + uu_free(pp); +} + +void +uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp) +{ + uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg; + + if (pp->ulp_debug) { + uintptr_t offset = (uintptr_t)np - (uintptr_t)base; + if (offset + sizeof (*np) > pp->ulp_objsize) { + uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): " + "offset %ld doesn't fit in object (size %ld)\n", + base, (void *)np, (void *)pp, pp->ulp_name, + (long)offset, (long)pp->ulp_objsize); + } + if (offset != pp->ulp_nodeoffset) { + uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): " + "offset %ld doesn't match pool's offset (%ld)\n", + base, (void *)np, (void *)pp, pp->ulp_name, + (long)offset, (long)pp->ulp_objsize); + } + } + np->uln_next = POOL_TO_MARKER(pp); + np->uln_prev = NULL; +} + +void +uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp) +{ + uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg; + + if (pp->ulp_debug) { + if (np->uln_next == NULL && + np->uln_prev == NULL) { + uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): " + "node already finied\n", + base, (void *)np_arg, (void *)pp, pp->ulp_name); + } + if (np->uln_next != POOL_TO_MARKER(pp) || + np->uln_prev != NULL) { + uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): " + "node corrupt or on list\n", + base, (void *)np_arg, (void *)pp, pp->ulp_name); + } + } + np->uln_next = NULL; + np->uln_prev = NULL; +} + +uu_list_t * +uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags) +{ + uu_list_t *lp, *next, *prev; + + if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) { + if (pp->ulp_debug) + uu_panic("uu_list_create(%p, ...): requested " + "UU_LIST_SORTED, but pool has no comparison func\n", + (void *)pp); + uu_set_error(UU_ERROR_NOT_SUPPORTED); + return (NULL); + } + + lp = uu_zalloc(sizeof (*lp)); + if (lp == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + lp->ul_pool = pp; + lp->ul_parent_enc = UU_PTR_ENCODE(parent); + lp->ul_offset = pp->ulp_nodeoffset; + lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG); + lp->ul_sorted = (flags & UU_LIST_SORTED); + lp->ul_numnodes = 0; + lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index)); + + lp->ul_null_node.uln_next = &lp->ul_null_node; + lp->ul_null_node.uln_prev = &lp->ul_null_node; + + lp->ul_null_walk.ulw_next = &lp->ul_null_walk; + lp->ul_null_walk.ulw_prev = &lp->ul_null_walk; + + (void) pthread_mutex_lock(&pp->ulp_lock); + next = &pp->ulp_null_list; + prev = UU_PTR_DECODE(next->ul_prev_enc); + lp->ul_next_enc = UU_PTR_ENCODE(next); + lp->ul_prev_enc = UU_PTR_ENCODE(prev); + next->ul_prev_enc = UU_PTR_ENCODE(lp); + prev->ul_next_enc = UU_PTR_ENCODE(lp); + (void) pthread_mutex_unlock(&pp->ulp_lock); + + return (lp); +} + +void +uu_list_destroy(uu_list_t *lp) +{ + uu_list_pool_t *pp = lp->ul_pool; + + if (lp->ul_debug) { + if (lp->ul_null_node.uln_next != &lp->ul_null_node || + lp->ul_null_node.uln_prev != &lp->ul_null_node) { + uu_panic("uu_list_destroy(%p): list not empty\n", + (void *)lp); + } + if (lp->ul_numnodes != 0) { + uu_panic("uu_list_destroy(%p): numnodes is nonzero, " + "but list is empty\n", (void *)lp); + } + if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk || + lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) { + uu_panic("uu_list_destroy(%p): outstanding walkers\n", + (void *)lp); + } + } + + (void) pthread_mutex_lock(&pp->ulp_lock); + UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc; + UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc; + (void) pthread_mutex_unlock(&pp->ulp_lock); + lp->ul_prev_enc = UU_PTR_ENCODE(NULL); + lp->ul_next_enc = UU_PTR_ENCODE(NULL); + lp->ul_pool = NULL; + uu_free(lp); +} + +static void +list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev, + uu_list_node_impl_t *next) +{ + if (lp->ul_debug) { + if (next->uln_prev != prev || prev->uln_next != next) + uu_panic("insert(%p): internal error: %p and %p not " + "neighbors\n", (void *)lp, (void *)next, + (void *)prev); + + if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) || + np->uln_prev != NULL) { + uu_panic("insert(%p): elem %p node %p corrupt, " + "not initialized, or already in a list.\n", + (void *)lp, NODE_TO_ELEM(lp, np), (void *)np); + } + /* + * invalidate outstanding uu_list_index_ts. + */ + lp->ul_index = INDEX_NEXT(lp->ul_index); + } + np->uln_next = next; + np->uln_prev = prev; + next->uln_prev = np; + prev->uln_next = np; + + lp->ul_numnodes++; +} + +void +uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx) +{ + uu_list_node_impl_t *np; + + np = INDEX_TO_NODE(idx); + if (np == NULL) + np = &lp->ul_null_node; + + if (lp->ul_debug) { + if (!INDEX_VALID(lp, idx)) + uu_panic("uu_list_insert(%p, %p, %p): %s\n", + (void *)lp, elem, (void *)idx, + INDEX_CHECK(idx)? "outdated index" : + "invalid index"); + if (np->uln_prev == NULL) + uu_panic("uu_list_insert(%p, %p, %p): out-of-date " + "index\n", (void *)lp, elem, (void *)idx); + } + + list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np); +} + +void * +uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out) +{ + int sorted = lp->ul_sorted; + uu_compare_fn_t *func = lp->ul_pool->ulp_cmp; + uu_list_node_impl_t *np; + + if (func == NULL) { + if (out != NULL) + *out = 0; + uu_set_error(UU_ERROR_NOT_SUPPORTED); + return (NULL); + } + for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node; + np = np->uln_next) { + void *ep = NODE_TO_ELEM(lp, np); + int cmp = func(ep, elem, private); + if (cmp == 0) { + if (out != NULL) + *out = NODE_TO_INDEX(lp, np); + return (ep); + } + if (sorted && cmp > 0) { + if (out != NULL) + *out = NODE_TO_INDEX(lp, np); + return (NULL); + } + } + if (out != NULL) + *out = NODE_TO_INDEX(lp, 0); + return (NULL); +} + +void * +uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx) +{ + uu_list_node_impl_t *np = INDEX_TO_NODE(idx); + + if (np == NULL) + np = &lp->ul_null_node; + + if (lp->ul_debug) { + if (!INDEX_VALID(lp, idx)) + uu_panic("uu_list_nearest_next(%p, %p): %s\n", + (void *)lp, (void *)idx, + INDEX_CHECK(idx)? "outdated index" : + "invalid index"); + if (np->uln_prev == NULL) + uu_panic("uu_list_nearest_next(%p, %p): out-of-date " + "index\n", (void *)lp, (void *)idx); + } + + if (np == &lp->ul_null_node) + return (NULL); + else + return (NODE_TO_ELEM(lp, np)); +} + +void * +uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx) +{ + uu_list_node_impl_t *np = INDEX_TO_NODE(idx); + + if (np == NULL) + np = &lp->ul_null_node; + + if (lp->ul_debug) { + if (!INDEX_VALID(lp, idx)) + uu_panic("uu_list_nearest_prev(%p, %p): %s\n", + (void *)lp, (void *)idx, INDEX_CHECK(idx)? + "outdated index" : "invalid index"); + if (np->uln_prev == NULL) + uu_panic("uu_list_nearest_prev(%p, %p): out-of-date " + "index\n", (void *)lp, (void *)idx); + } + + if ((np = np->uln_prev) == &lp->ul_null_node) + return (NULL); + else + return (NODE_TO_ELEM(lp, np)); +} + +static void +list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags) +{ + uu_list_walk_t *next, *prev; + + int robust = (flags & UU_WALK_ROBUST); + int direction = (flags & UU_WALK_REVERSE)? -1 : 1; + + (void) memset(wp, 0, sizeof (*wp)); + wp->ulw_list = lp; + wp->ulw_robust = robust; + wp->ulw_dir = direction; + if (direction > 0) + wp->ulw_next_result = lp->ul_null_node.uln_next; + else + wp->ulw_next_result = lp->ul_null_node.uln_prev; + + if (lp->ul_debug || robust) { + /* + * Add this walker to the list's list of walkers so + * uu_list_remove() can advance us if somebody tries to + * remove ulw_next_result. + */ + wp->ulw_next = next = &lp->ul_null_walk; + wp->ulw_prev = prev = next->ulw_prev; + next->ulw_prev = wp; + prev->ulw_next = wp; + } +} + +static uu_list_node_impl_t * +list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp) +{ + uu_list_node_impl_t *np = wp->ulw_next_result; + uu_list_node_impl_t *next; + + if (np == &lp->ul_null_node) + return (NULL); + + next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev; + + wp->ulw_next_result = next; + return (np); +} + +static void +list_walk_fini(uu_list_walk_t *wp) +{ + /* GLXXX debugging? */ + if (wp->ulw_next != NULL) { + wp->ulw_next->ulw_prev = wp->ulw_prev; + wp->ulw_prev->ulw_next = wp->ulw_next; + wp->ulw_next = NULL; + wp->ulw_prev = NULL; + } + wp->ulw_list = NULL; + wp->ulw_next_result = NULL; +} + +uu_list_walk_t * +uu_list_walk_start(uu_list_t *lp, uint32_t flags) +{ + uu_list_walk_t *wp; + + if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (NULL); + } + + wp = uu_zalloc(sizeof (*wp)); + if (wp == NULL) { + uu_set_error(UU_ERROR_NO_MEMORY); + return (NULL); + } + + list_walk_init(wp, lp, flags); + return (wp); +} + +void * +uu_list_walk_next(uu_list_walk_t *wp) +{ + uu_list_t *lp = wp->ulw_list; + uu_list_node_impl_t *np = list_walk_advance(wp, lp); + + if (np == NULL) + return (NULL); + + return (NODE_TO_ELEM(lp, np)); +} + +void +uu_list_walk_end(uu_list_walk_t *wp) +{ + list_walk_fini(wp); + uu_free(wp); +} + +int +uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags) +{ + uu_list_node_impl_t *np; + + int status = UU_WALK_NEXT; + + int robust = (flags & UU_WALK_ROBUST); + int reverse = (flags & UU_WALK_REVERSE); + + if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) { + uu_set_error(UU_ERROR_UNKNOWN_FLAG); + return (-1); + } + + if (lp->ul_debug || robust) { + uu_list_walk_t my_walk; + void *e; + + list_walk_init(&my_walk, lp, flags); + while (status == UU_WALK_NEXT && + (e = uu_list_walk_next(&my_walk)) != NULL) + status = (*func)(e, private); + list_walk_fini(&my_walk); + } else { + if (!reverse) { + for (np = lp->ul_null_node.uln_next; + status == UU_WALK_NEXT && np != &lp->ul_null_node; + np = np->uln_next) { + status = (*func)(NODE_TO_ELEM(lp, np), private); + } + } else { + for (np = lp->ul_null_node.uln_prev; + status == UU_WALK_NEXT && np != &lp->ul_null_node; + np = np->uln_prev) { + status = (*func)(NODE_TO_ELEM(lp, np), private); + } + } + } + if (status >= 0) + return (0); + uu_set_error(UU_ERROR_CALLBACK_FAILED); + return (-1); +} + +void +uu_list_remove(uu_list_t *lp, void *elem) +{ + uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem); + uu_list_walk_t *wp; + + if (lp->ul_debug) { + if (np->uln_prev == NULL) + uu_panic("uu_list_remove(%p, %p): elem not on list\n", + (void *)lp, elem); + /* + * invalidate outstanding uu_list_index_ts. + */ + lp->ul_index = INDEX_NEXT(lp->ul_index); + } + + /* + * robust walkers must be advanced. In debug mode, non-robust + * walkers are also on the list. If there are any, it's an error. + */ + for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk; + wp = wp->ulw_next) { + if (wp->ulw_robust) { + if (np == wp->ulw_next_result) + (void) list_walk_advance(wp, lp); + } else if (wp->ulw_next_result != NULL) { + uu_panic("uu_list_remove(%p, %p): active non-robust " + "walker\n", (void *)lp, elem); + } + } + + np->uln_next->uln_prev = np->uln_prev; + np->uln_prev->uln_next = np->uln_next; + + lp->ul_numnodes--; + + np->uln_next = POOL_TO_MARKER(lp->ul_pool); + np->uln_prev = NULL; +} + +void * +uu_list_teardown(uu_list_t *lp, void **cookie) +{ + void *ep; + + /* + * XXX: disable list modification until list is empty + */ + if (lp->ul_debug && *cookie != NULL) + uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n", + (void *)lp, (void *)cookie); + + ep = uu_list_first(lp); + if (ep) + uu_list_remove(lp, ep); + return (ep); +} + +int +uu_list_insert_before(uu_list_t *lp, void *target, void *elem) +{ + uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target); + + if (target == NULL) + np = &lp->ul_null_node; + + if (lp->ul_debug) { + if (np->uln_prev == NULL) + uu_panic("uu_list_insert_before(%p, %p, %p): %p is " + "not currently on a list\n", + (void *)lp, target, elem, target); + } + if (lp->ul_sorted) { + if (lp->ul_debug) + uu_panic("uu_list_insert_before(%p, ...): list is " + "UU_LIST_SORTED\n", (void *)lp); + uu_set_error(UU_ERROR_NOT_SUPPORTED); + return (-1); + } + + list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np); + return (0); +} + +int +uu_list_insert_after(uu_list_t *lp, void *target, void *elem) +{ + uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target); + + if (target == NULL) + np = &lp->ul_null_node; + + if (lp->ul_debug) { + if (np->uln_prev == NULL) + uu_panic("uu_list_insert_after(%p, %p, %p): %p is " + "not currently on a list\n", + (void *)lp, target, elem, target); + } + if (lp->ul_sorted) { + if (lp->ul_debug) + uu_panic("uu_list_insert_after(%p, ...): list is " + "UU_LIST_SORTED\n", (void *)lp); + uu_set_error(UU_ERROR_NOT_SUPPORTED); + return (-1); + } + + list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next); + return (0); +} + +size_t +uu_list_numnodes(uu_list_t *lp) +{ + return (lp->ul_numnodes); +} + +void * +uu_list_first(uu_list_t *lp) +{ + uu_list_node_impl_t *n = lp->ul_null_node.uln_next; + if (n == &lp->ul_null_node) + return (NULL); + return (NODE_TO_ELEM(lp, n)); +} + +void * +uu_list_last(uu_list_t *lp) +{ + uu_list_node_impl_t *n = lp->ul_null_node.uln_prev; + if (n == &lp->ul_null_node) + return (NULL); + return (NODE_TO_ELEM(lp, n)); +} + +void * +uu_list_next(uu_list_t *lp, void *elem) +{ + uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem); + + n = n->uln_next; + if (n == &lp->ul_null_node) + return (NULL); + return (NODE_TO_ELEM(lp, n)); +} + +void * +uu_list_prev(uu_list_t *lp, void *elem) +{ + uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem); + + n = n->uln_prev; + if (n == &lp->ul_null_node) + return (NULL); + return (NODE_TO_ELEM(lp, n)); +} + +/* + * called from uu_lockup() and uu_release(), as part of our fork1()-safety. + */ +void +uu_list_lockup(void) +{ + uu_list_pool_t *pp; + + (void) pthread_mutex_lock(&uu_lpool_list_lock); + for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool; + pp = pp->ulp_next) + (void) pthread_mutex_lock(&pp->ulp_lock); +} + +void +uu_list_release(void) +{ + uu_list_pool_t *pp; + + for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool; + pp = pp->ulp_next) + (void) pthread_mutex_unlock(&pp->ulp_lock); + (void) pthread_mutex_unlock(&uu_lpool_list_lock); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_misc.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_misc.c new file mode 100644 index 000000000000..74ec177c11b7 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_misc.c @@ -0,0 +1,255 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(TEXT_DOMAIN) +#define TEXT_DOMAIN "SYS_TEST" +#endif + +/* + * All of the old code under !defined(PTHREAD_ONCE_KEY_NP) + * is here to enable the building of a native version of + * libuutil.so when the build machine has not yet been upgraded + * to a version of libc that provides pthread_key_create_once_np(). + * It should all be deleted when solaris_nevada ships. + * The code is not MT-safe in a relaxed memory model. + */ + +#if defined(PTHREAD_ONCE_KEY_NP) +static pthread_key_t uu_error_key = PTHREAD_ONCE_KEY_NP; +#else /* PTHREAD_ONCE_KEY_NP */ +static pthread_key_t uu_error_key = 0; +static pthread_mutex_t uu_key_lock = PTHREAD_MUTEX_INITIALIZER; +#endif /* PTHREAD_ONCE_KEY_NP */ + +static int uu_error_key_setup = 0; + +static pthread_mutex_t uu_panic_lock = PTHREAD_MUTEX_INITIALIZER; +/* LINTED static unused */ +static const char *uu_panic_format; +/* LINTED static unused */ +static va_list uu_panic_args; +static pthread_t uu_panic_thread; + +static uint32_t _uu_main_error; + +void +uu_set_error(uint_t code) +{ + if (thr_main() != 0) { + _uu_main_error = code; + return; + } +#if defined(PTHREAD_ONCE_KEY_NP) + if (pthread_key_create_once_np(&uu_error_key, NULL) != 0) + uu_error_key_setup = -1; + else + uu_error_key_setup = 1; +#else /* PTHREAD_ONCE_KEY_NP */ + if (uu_error_key_setup == 0) { + (void) pthread_mutex_lock(&uu_key_lock); + if (uu_error_key_setup == 0) { + if (pthread_key_create(&uu_error_key, NULL) != 0) + uu_error_key_setup = -1; + else + uu_error_key_setup = 1; + } + (void) pthread_mutex_unlock(&uu_key_lock); + } +#endif /* PTHREAD_ONCE_KEY_NP */ + if (uu_error_key_setup > 0) + (void) pthread_setspecific(uu_error_key, + (void *)(uintptr_t)code); +} + +uint32_t +uu_error(void) +{ + if (thr_main() != 0) + return (_uu_main_error); + + if (uu_error_key_setup < 0) /* can't happen? */ + return (UU_ERROR_UNKNOWN); + + /* + * Because UU_ERROR_NONE == 0, if uu_set_error() was + * never called, then this will return UU_ERROR_NONE: + */ + return ((uint32_t)(uintptr_t)pthread_getspecific(uu_error_key)); +} + +const char * +uu_strerror(uint32_t code) +{ + const char *str; + + switch (code) { + case UU_ERROR_NONE: + str = dgettext(TEXT_DOMAIN, "No error"); + break; + + case UU_ERROR_INVALID_ARGUMENT: + str = dgettext(TEXT_DOMAIN, "Invalid argument"); + break; + + case UU_ERROR_UNKNOWN_FLAG: + str = dgettext(TEXT_DOMAIN, "Unknown flag passed"); + break; + + case UU_ERROR_NO_MEMORY: + str = dgettext(TEXT_DOMAIN, "Out of memory"); + break; + + case UU_ERROR_CALLBACK_FAILED: + str = dgettext(TEXT_DOMAIN, "Callback-initiated failure"); + break; + + case UU_ERROR_NOT_SUPPORTED: + str = dgettext(TEXT_DOMAIN, "Operation not supported"); + break; + + case UU_ERROR_EMPTY: + str = dgettext(TEXT_DOMAIN, "No value provided"); + break; + + case UU_ERROR_UNDERFLOW: + str = dgettext(TEXT_DOMAIN, "Value too small"); + break; + + case UU_ERROR_OVERFLOW: + str = dgettext(TEXT_DOMAIN, "Value too large"); + break; + + case UU_ERROR_INVALID_CHAR: + str = dgettext(TEXT_DOMAIN, + "Value contains unexpected character"); + break; + + case UU_ERROR_INVALID_DIGIT: + str = dgettext(TEXT_DOMAIN, + "Value contains digit not in base"); + break; + + case UU_ERROR_SYSTEM: + str = dgettext(TEXT_DOMAIN, "Underlying system error"); + break; + + case UU_ERROR_UNKNOWN: + str = dgettext(TEXT_DOMAIN, "Error status not known"); + break; + + default: + errno = ESRCH; + str = NULL; + break; + } + return (str); +} + +void +uu_panic(const char *format, ...) +{ + va_list args; + + va_start(args, format); + + (void) pthread_mutex_lock(&uu_panic_lock); + if (uu_panic_thread == 0) { + uu_panic_thread = pthread_self(); + uu_panic_format = format; + va_copy(uu_panic_args, args); + } + (void) pthread_mutex_unlock(&uu_panic_lock); + + (void) vfprintf(stderr, format, args); + + if (uu_panic_thread == pthread_self()) + abort(); + else + for (;;) + (void) pause(); +} + +int +assfail(const char *astring, const char *file, int line) +{ + __assert(astring, file, line); + /*NOTREACHED*/ + return (0); +} + +static void +uu_lockup(void) +{ + (void) pthread_mutex_lock(&uu_panic_lock); +#if !defined(PTHREAD_ONCE_KEY_NP) + (void) pthread_mutex_lock(&uu_key_lock); +#endif + uu_avl_lockup(); + uu_list_lockup(); +} + +static void +uu_release(void) +{ + (void) pthread_mutex_unlock(&uu_panic_lock); +#if !defined(PTHREAD_ONCE_KEY_NP) + (void) pthread_mutex_unlock(&uu_key_lock); +#endif + uu_avl_release(); + uu_list_release(); +} + +static void +uu_release_child(void) +{ + uu_panic_format = NULL; + uu_panic_thread = 0; + + uu_release(); +} + +#pragma init(uu_init) +static void +uu_init(void) +{ + (void) pthread_atfork(uu_lockup, uu_release, uu_release_child); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_open.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_open.c new file mode 100644 index 000000000000..7256662e38f6 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_open.c @@ -0,0 +1,70 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2004 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include + +#include +#include +#include +#include +#include + +#ifdef _LP64 +#define TMPPATHFMT "%s/uu%ld" +#else /* _LP64 */ +#define TMPPATHFMT "%s/uu%lld" +#endif /* _LP64 */ + +/*ARGSUSED*/ +int +uu_open_tmp(const char *dir, uint_t uflags) +{ + int f; + char *fname = uu_zalloc(PATH_MAX); + + if (fname == NULL) + return (-1); + + for (;;) { + (void) snprintf(fname, PATH_MAX, "%s/uu%lld", dir, gethrtime()); + + f = open(fname, O_CREAT | O_EXCL | O_RDWR, 0600); + + if (f >= 0 || errno != EEXIST) + break; + } + + if (f >= 0) + (void) unlink(fname); + + uu_free(fname); + + return (f); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_pname.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_pname.c new file mode 100644 index 000000000000..3307a26dc40d --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_pname.c @@ -0,0 +1,207 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char PNAME_FMT[] = "%s: "; +static const char ERRNO_FMT[] = ": %s\n"; + +static const char *pname; + +static void +uu_die_internal(int status, const char *format, va_list alist) __NORETURN; + +int uu_exit_ok_value = EXIT_SUCCESS; +int uu_exit_fatal_value = EXIT_FAILURE; +int uu_exit_usage_value = 2; + +int * +uu_exit_ok(void) +{ + return (&uu_exit_ok_value); +} + +int * +uu_exit_fatal(void) +{ + return (&uu_exit_fatal_value); +} + +int * +uu_exit_usage(void) +{ + return (&uu_exit_usage_value); +} + +void +uu_alt_exit(int profile) +{ + switch (profile) { + case UU_PROFILE_DEFAULT: + uu_exit_ok_value = EXIT_SUCCESS; + uu_exit_fatal_value = EXIT_FAILURE; + uu_exit_usage_value = 2; + break; + case UU_PROFILE_LAUNCHER: + uu_exit_ok_value = EXIT_SUCCESS; + uu_exit_fatal_value = 124; + uu_exit_usage_value = 125; + break; + } +} + +static void +uu_warn_internal(int err, const char *format, va_list alist) +{ + if (pname != NULL) + (void) fprintf(stderr, PNAME_FMT, pname); + + (void) vfprintf(stderr, format, alist); + + if (strrchr(format, '\n') == NULL) + (void) fprintf(stderr, ERRNO_FMT, strerror(err)); +} + +void +uu_vwarn(const char *format, va_list alist) +{ + uu_warn_internal(errno, format, alist); +} + +/*PRINTFLIKE1*/ +void +uu_warn(const char *format, ...) +{ + va_list alist; + va_start(alist, format); + uu_warn_internal(errno, format, alist); + va_end(alist); +} + +static void +uu_die_internal(int status, const char *format, va_list alist) +{ + uu_warn_internal(errno, format, alist); +#ifdef DEBUG + { + char *cp; + + if (!issetugid()) { + cp = getenv("UU_DIE_ABORTS"); + if (cp != NULL && *cp != '\0') + abort(); + } + } +#endif + exit(status); +} + +void +uu_vdie(const char *format, va_list alist) +{ + uu_die_internal(UU_EXIT_FATAL, format, alist); +} + +/*PRINTFLIKE1*/ +void +uu_die(const char *format, ...) +{ + va_list alist; + va_start(alist, format); + uu_die_internal(UU_EXIT_FATAL, format, alist); + va_end(alist); +} + +void +uu_vxdie(int status, const char *format, va_list alist) +{ + uu_die_internal(status, format, alist); +} + +/*PRINTFLIKE2*/ +void +uu_xdie(int status, const char *format, ...) +{ + va_list alist; + va_start(alist, format); + uu_die_internal(status, format, alist); + va_end(alist); +} + +const char * +uu_setpname(char *arg0) +{ + /* + * Having a NULL argv[0], while uncommon, is possible. It + * makes more sense to handle this event in uu_setpname rather + * than in each of its consumers. + */ + if (arg0 == NULL) { + pname = getexecname(); + if (pname == NULL) + pname = "unknown_command"; + return (pname); + } + + /* + * Guard against '/' at end of command invocation. + */ + for (;;) { + char *p = strrchr(arg0, '/'); + if (p == NULL) { + pname = arg0; + break; + } else { + if (*(p + 1) == '\0') { + *p = '\0'; + continue; + } + + pname = p + 1; + break; + } + } + + return (pname); +} + +const char * +uu_getpname(void) +{ + return (pname); +} diff --git a/external/cddl/osnet/dist/lib/libuutil/common/uu_strtoint.c b/external/cddl/osnet/dist/lib/libuutil/common/uu_strtoint.c new file mode 100644 index 000000000000..8fd1148365cb --- /dev/null +++ b/external/cddl/osnet/dist/lib/libuutil/common/uu_strtoint.c @@ -0,0 +1,300 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2004 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include "libuutil_common.h" + +#include +#include + +#define MAX_BASE 36 + +#define IS_DIGIT(x) ((x) >= '0' && (x) <= '9') + +#define CTOI(x) (((x) >= '0' && (x) <= '9') ? (x) - '0' : \ + ((x) >= 'a' && (x) <= 'z') ? (x) + 10 - 'a' : (x) + 10 - 'A') + +static int +strtoint(const char *s_arg, uint64_t *out, uint32_t base, int sign) +{ + const unsigned char *s = (const unsigned char *)s_arg; + + uint64_t val = 0; + uint64_t multmax; + + unsigned c, i; + + int neg = 0; + + int bad_digit = 0; + int bad_char = 0; + int overflow = 0; + + if (s == NULL || base == 1 || base > MAX_BASE) { + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (-1); + } + + while ((c = *s) != 0 && isspace(c)) + s++; + + switch (c) { + case '-': + if (!sign) + overflow = 1; /* becomes underflow below */ + neg = 1; + /*FALLTHRU*/ + case '+': + c = *++s; + break; + default: + break; + } + + if (c == '\0') { + uu_set_error(UU_ERROR_EMPTY); + return (-1); + } + + if (base == 0) { + if (c != '0') + base = 10; + else if (s[1] == 'x' || s[1] == 'X') + base = 16; + else + base = 8; + } + + if (base == 16 && c == '0' && (s[1] == 'x' || s[1] == 'X')) + c = *(s += 2); + + if ((val = CTOI(c)) >= base) { + if (IS_DIGIT(c)) + bad_digit = 1; + else + bad_char = 1; + val = 0; + } + + multmax = (uint64_t)UINT64_MAX / (uint64_t)base; + + for (c = *++s; c != '\0'; c = *++s) { + if ((i = CTOI(c)) >= base) { + if (isspace(c)) + break; + if (IS_DIGIT(c)) + bad_digit = 1; + else + bad_char = 1; + i = 0; + } + + if (val > multmax) + overflow = 1; + + val *= base; + if ((uint64_t)UINT64_MAX - val < (uint64_t)i) + overflow = 1; + + val += i; + } + + while ((c = *s) != 0) { + if (!isspace(c)) + bad_char = 1; + s++; + } + + if (sign) { + if (neg) { + if (val > -(uint64_t)INT64_MIN) + overflow = 1; + } else { + if (val > INT64_MAX) + overflow = 1; + } + } + + if (neg) + val = -val; + + if (bad_char | bad_digit | overflow) { + if (bad_char) + uu_set_error(UU_ERROR_INVALID_CHAR); + else if (bad_digit) + uu_set_error(UU_ERROR_INVALID_DIGIT); + else if (overflow) { + if (neg) + uu_set_error(UU_ERROR_UNDERFLOW); + else + uu_set_error(UU_ERROR_OVERFLOW); + } + return (-1); + } + + *out = val; + return (0); +} + +int +uu_strtoint(const char *s, void *v, size_t sz, int base, + int64_t min, int64_t max) +{ + uint64_t val_u; + int64_t val; + + if (min > max) + goto bad_argument; + + switch (sz) { + case 1: + if (max > INT8_MAX || min < INT8_MIN) + goto bad_argument; + break; + case 2: + if (max > INT16_MAX || min < INT16_MIN) + goto bad_argument; + break; + case 4: + if (max > INT32_MAX || min < INT32_MIN) + goto bad_argument; + break; + case 8: + if (max > INT64_MAX || min < INT64_MIN) + goto bad_argument; + break; + default: + goto bad_argument; + } + + if (min == 0 && max == 0) { + min = -(1ULL << (8 * sz - 1)); + max = (1ULL << (8 * sz - 1)) - 1; + } + + if (strtoint(s, &val_u, base, 1) == -1) + return (-1); + + val = (int64_t)val_u; + + if (val < min) { + uu_set_error(UU_ERROR_UNDERFLOW); + return (-1); + } else if (val > max) { + uu_set_error(UU_ERROR_OVERFLOW); + return (-1); + } + + switch (sz) { + case 1: + *(int8_t *)v = val; + return (0); + case 2: + *(int16_t *)v = val; + return (0); + case 4: + *(int32_t *)v = val; + return (0); + case 8: + *(int64_t *)v = val; + return (0); + default: + break; /* fall through to bad_argument */ + } + +bad_argument: + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (-1); +} + +int +uu_strtouint(const char *s, void *v, size_t sz, int base, + uint64_t min, uint64_t max) +{ + uint64_t val; + + if (min > max) + goto bad_argument; + + switch (sz) { + case 1: + if (max > UINT8_MAX) + goto bad_argument; + break; + case 2: + if (max > UINT16_MAX) + goto bad_argument; + break; + case 4: + if (max > UINT32_MAX) + goto bad_argument; + break; + case 8: + if (max > UINT64_MAX) + goto bad_argument; + break; + default: + goto bad_argument; + } + + if (min == 0 && max == 0) { + /* we have to be careful, since << can overflow */ + max = (1ULL << (8 * sz - 1)) * 2 - 1; + } + + if (strtoint(s, &val, base, 0) == -1) + return (-1); + + if (val < min) { + uu_set_error(UU_ERROR_UNDERFLOW); + return (-1); + } else if (val > max) { + uu_set_error(UU_ERROR_OVERFLOW); + return (-1); + } + + switch (sz) { + case 1: + *(uint8_t *)v = val; + return (0); + case 2: + *(uint16_t *)v = val; + return (0); + case 4: + *(uint32_t *)v = val; + return (0); + case 8: + *(uint64_t *)v = val; + return (0); + default: + break; /* shouldn't happen, fall through */ + } + +bad_argument: + uu_set_error(UU_ERROR_INVALID_ARGUMENT); + return (-1); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs.h b/external/cddl/osnet/dist/lib/libzfs/common/libzfs.h new file mode 100644 index 000000000000..c650865f30ad --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs.h @@ -0,0 +1,570 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBZFS_H +#define _LIBZFS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Miscellaneous ZFS constants + */ +#define ZFS_MAXNAMELEN MAXNAMELEN +#define ZPOOL_MAXNAMELEN MAXNAMELEN +#define ZFS_MAXPROPLEN MAXPATHLEN +#define ZPOOL_MAXPROPLEN MAXPATHLEN + +/* + * libzfs errors + */ +enum { + EZFS_NOMEM = 2000, /* out of memory */ + EZFS_BADPROP, /* invalid property value */ + EZFS_PROPREADONLY, /* cannot set readonly property */ + EZFS_PROPTYPE, /* property does not apply to dataset type */ + EZFS_PROPNONINHERIT, /* property is not inheritable */ + EZFS_PROPSPACE, /* bad quota or reservation */ + EZFS_BADTYPE, /* dataset is not of appropriate type */ + EZFS_BUSY, /* pool or dataset is busy */ + EZFS_EXISTS, /* pool or dataset already exists */ + EZFS_NOENT, /* no such pool or dataset */ + EZFS_BADSTREAM, /* bad backup stream */ + EZFS_DSREADONLY, /* dataset is readonly */ + EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */ + EZFS_VOLHASDATA, /* volume already contains data */ + EZFS_INVALIDNAME, /* invalid dataset name */ + EZFS_BADRESTORE, /* unable to restore to destination */ + EZFS_BADBACKUP, /* backup failed */ + EZFS_BADTARGET, /* bad attach/detach/replace target */ + EZFS_NODEVICE, /* no such device in pool */ + EZFS_BADDEV, /* invalid device to add */ + EZFS_NOREPLICAS, /* no valid replicas */ + EZFS_RESILVERING, /* currently resilvering */ + EZFS_BADVERSION, /* unsupported version */ + EZFS_POOLUNAVAIL, /* pool is currently unavailable */ + EZFS_DEVOVERFLOW, /* too many devices in one vdev */ + EZFS_BADPATH, /* must be an absolute path */ + EZFS_CROSSTARGET, /* rename or clone across pool or dataset */ + EZFS_ZONED, /* used improperly in local zone */ + EZFS_MOUNTFAILED, /* failed to mount dataset */ + EZFS_UMOUNTFAILED, /* failed to unmount dataset */ + EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */ + EZFS_SHARENFSFAILED, /* share(1M) failed */ + EZFS_DEVLINKS, /* failed to create zvol links */ + EZFS_PERM, /* permission denied */ + EZFS_NOSPC, /* out of space */ + EZFS_IO, /* I/O error */ + EZFS_INTR, /* signal received */ + EZFS_ISSPARE, /* device is a hot spare */ + EZFS_INVALCONFIG, /* invalid vdev configuration */ + EZFS_RECURSIVE, /* recursive dependency */ + EZFS_NOHISTORY, /* no history object */ + EZFS_UNSHAREISCSIFAILED, /* iscsitgtd failed request to unshare */ + EZFS_SHAREISCSIFAILED, /* iscsitgtd failed request to share */ + EZFS_POOLPROPS, /* couldn't retrieve pool props */ + EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */ + EZFS_POOL_INVALARG, /* invalid argument for this pool operation */ + EZFS_NAMETOOLONG, /* dataset name is too long */ + EZFS_OPENFAILED, /* open of device failed */ + EZFS_NOCAP, /* couldn't get capacity */ + EZFS_LABELFAILED, /* write of label failed */ + EZFS_ISCSISVCUNAVAIL, /* iscsi service unavailable */ + EZFS_BADWHO, /* invalid permission who */ + EZFS_BADPERM, /* invalid permission */ + EZFS_BADPERMSET, /* invalid permission set name */ + EZFS_NODELEGATION, /* delegated administration is disabled */ + EZFS_PERMRDONLY, /* pemissions are readonly */ + EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */ + EZFS_SHARESMBFAILED, /* failed to share over smb */ + EZFS_BADCACHE, /* bad cache file */ + EZFS_ISL2CACHE, /* device is for the level 2 ARC */ + EZFS_VDEVNOTSUP, /* unsupported vdev type */ + EZFS_NOTSUP, /* ops not supported on this dataset */ + EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */ + EZFS_UNKNOWN +}; + +/* + * The following data structures are all part + * of the zfs_allow_t data structure which is + * used for printing 'allow' permissions. + * It is a linked list of zfs_allow_t's which + * then contain avl tree's for user/group/sets/... + * and each one of the entries in those trees have + * avl tree's for the permissions they belong to and + * whether they are local,descendent or local+descendent + * permissions. The AVL trees are used primarily for + * sorting purposes, but also so that we can quickly find + * a given user and or permission. + */ +typedef struct zfs_perm_node { + avl_node_t z_node; + char z_pname[MAXPATHLEN]; +} zfs_perm_node_t; + +typedef struct zfs_allow_node { + avl_node_t z_node; + char z_key[MAXPATHLEN]; /* name, such as joe */ + avl_tree_t z_localdescend; /* local+descendent perms */ + avl_tree_t z_local; /* local permissions */ + avl_tree_t z_descend; /* descendent permissions */ +} zfs_allow_node_t; + +typedef struct zfs_allow { + struct zfs_allow *z_next; + char z_setpoint[MAXPATHLEN]; + avl_tree_t z_sets; + avl_tree_t z_crperms; + avl_tree_t z_user; + avl_tree_t z_group; + avl_tree_t z_everyone; +} zfs_allow_t; + +/* + * Basic handle types + */ +typedef struct zfs_handle zfs_handle_t; +typedef struct zpool_handle zpool_handle_t; +typedef struct libzfs_handle libzfs_handle_t; + +/* + * Library initialization + */ +extern libzfs_handle_t *libzfs_init(void); +extern void libzfs_fini(libzfs_handle_t *); + +extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *); +extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *); + +extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t); + +extern int libzfs_errno(libzfs_handle_t *); +extern const char *libzfs_error_action(libzfs_handle_t *); +extern const char *libzfs_error_description(libzfs_handle_t *); + +/* + * Basic handle functions + */ +extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *); +extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *); +extern void zpool_close(zpool_handle_t *); +extern const char *zpool_get_name(zpool_handle_t *); +extern int zpool_get_state(zpool_handle_t *); +extern char *zpool_state_to_name(vdev_state_t, vdev_aux_t); +extern void zpool_free_handles(libzfs_handle_t *); + +/* + * Iterate over all active pools in the system. + */ +typedef int (*zpool_iter_f)(zpool_handle_t *, void *); +extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *); + +/* + * Functions to create and destroy pools + */ +extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *, + nvlist_t *, nvlist_t *); +extern int zpool_destroy(zpool_handle_t *); +extern int zpool_add(zpool_handle_t *, nvlist_t *); + +/* + * Functions to manipulate pool and vdev state + */ +extern int zpool_scrub(zpool_handle_t *, pool_scrub_type_t); +extern int zpool_clear(zpool_handle_t *, const char *); + +extern int zpool_vdev_online(zpool_handle_t *, const char *, int, + vdev_state_t *); +extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t); +extern int zpool_vdev_attach(zpool_handle_t *, const char *, + const char *, nvlist_t *, int); +extern int zpool_vdev_detach(zpool_handle_t *, const char *); +extern int zpool_vdev_remove(zpool_handle_t *, const char *); + +extern int zpool_vdev_fault(zpool_handle_t *, uint64_t); +extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t); +extern int zpool_vdev_clear(zpool_handle_t *, uint64_t); + +extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *, + boolean_t *, boolean_t *); +extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *); + +/* + * Functions to manage pool properties + */ +extern int zpool_set_prop(zpool_handle_t *, const char *, const char *); +extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *, + size_t proplen, zprop_source_t *); +extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t, + zprop_source_t *); + +extern const char *zpool_prop_to_name(zpool_prop_t); +extern const char *zpool_prop_values(zpool_prop_t); + +/* + * Pool health statistics. + */ +typedef enum { + /* + * The following correspond to faults as defined in the (fault.fs.zfs.*) + * event namespace. Each is associated with a corresponding message ID. + */ + ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */ + ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */ + ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */ + ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */ + ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */ + ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */ + ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */ + ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */ + ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */ + ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */ + ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */ + ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */ + ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */ + ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */ + ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */ + ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */ + + /* + * The following are not faults per se, but still an error possibly + * requiring administrative attention. There is no corresponding + * message ID. + */ + ZPOOL_STATUS_VERSION_OLDER, /* older on-disk version */ + ZPOOL_STATUS_RESILVERING, /* device being resilvered */ + ZPOOL_STATUS_OFFLINE_DEV, /* device online */ + + /* + * Finally, the following indicates a healthy pool. + */ + ZPOOL_STATUS_OK +} zpool_status_t; + +extern zpool_status_t zpool_get_status(zpool_handle_t *, char **); +extern zpool_status_t zpool_import_status(nvlist_t *, char **); + +/* + * Statistics and configuration functions. + */ +extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **); +extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *); +extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **); + +/* + * Import and export functions + */ +extern int zpool_export(zpool_handle_t *, boolean_t); +extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *, + char *altroot); +extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *, + nvlist_t *, boolean_t); + +/* + * Search for pools to import + */ +extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **); +extern nvlist_t *zpool_find_import_cached(libzfs_handle_t *, const char *, + char *, uint64_t); +extern nvlist_t *zpool_find_import_byname(libzfs_handle_t *, int, char **, + char *); +extern nvlist_t *zpool_find_import_byguid(libzfs_handle_t *, int, char **, + uint64_t); +extern nvlist_t *zpool_find_import_activeok(libzfs_handle_t *, int, char **); + +/* + * Miscellaneous pool functions + */ +struct zfs_cmd; + +extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *); +extern int zpool_upgrade(zpool_handle_t *, uint64_t); +extern int zpool_get_history(zpool_handle_t *, nvlist_t **); +extern void zpool_set_history_str(const char *subcommand, int argc, + char **argv, char *history_str); +extern int zpool_stage_history(libzfs_handle_t *, const char *); +extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *, + size_t len); +extern int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *); +extern int zpool_get_physpath(zpool_handle_t *, char *); +/* + * Basic handle manipulations. These functions do not create or destroy the + * underlying datasets, only the references to them. + */ +extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int); +extern void zfs_close(zfs_handle_t *); +extern zfs_type_t zfs_get_type(const zfs_handle_t *); +extern const char *zfs_get_name(const zfs_handle_t *); +extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *); + +/* + * Property management functions. Some functions are shared with the kernel, + * and are found in sys/fs/zfs.h. + */ + +/* + * zfs dataset property management + */ +extern const char *zfs_prop_default_string(zfs_prop_t); +extern uint64_t zfs_prop_default_numeric(zfs_prop_t); +extern const char *zfs_prop_column_name(zfs_prop_t); +extern boolean_t zfs_prop_align_right(zfs_prop_t); + +extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t, + nvlist_t *, uint64_t, zfs_handle_t *, const char *); + +extern const char *zfs_prop_to_name(zfs_prop_t); +extern int zfs_prop_set(zfs_handle_t *, const char *, const char *); +extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t, + zprop_source_t *, char *, size_t, boolean_t); +extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *, + zprop_source_t *, char *, size_t); +extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t); +extern int zfs_prop_inherit(zfs_handle_t *, const char *); +extern const char *zfs_prop_values(zfs_prop_t); +extern int zfs_prop_is_string(zfs_prop_t prop); +extern nvlist_t *zfs_get_user_props(zfs_handle_t *); + +typedef struct zprop_list { + int pl_prop; + char *pl_user_prop; + struct zprop_list *pl_next; + boolean_t pl_all; + size_t pl_width; + boolean_t pl_fixed; +} zprop_list_t; + +extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **); + +#define ZFS_MOUNTPOINT_NONE "none" +#define ZFS_MOUNTPOINT_LEGACY "legacy" + +/* + * zpool property management + */ +extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **); +extern const char *zpool_prop_default_string(zpool_prop_t); +extern uint64_t zpool_prop_default_numeric(zpool_prop_t); +extern const char *zpool_prop_column_name(zpool_prop_t); +extern boolean_t zpool_prop_align_right(zpool_prop_t); + +/* + * Functions shared by zfs and zpool property management. + */ +extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all, + boolean_t ordered, zfs_type_t type); +extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **, + zfs_type_t); +extern void zprop_free_list(zprop_list_t *); + +/* + * Functions for printing zfs or zpool properties + */ +typedef struct zprop_get_cbdata { + int cb_sources; + int cb_columns[4]; + int cb_colwidths[5]; + boolean_t cb_scripted; + boolean_t cb_literal; + boolean_t cb_first; + zprop_list_t *cb_proplist; + zfs_type_t cb_type; +} zprop_get_cbdata_t; + +void zprop_print_one_property(const char *, zprop_get_cbdata_t *, + const char *, const char *, zprop_source_t, const char *); + +#define GET_COL_NAME 1 +#define GET_COL_PROPERTY 2 +#define GET_COL_VALUE 3 +#define GET_COL_SOURCE 4 + +/* + * Iterator functions. + */ +typedef int (*zfs_iter_f)(zfs_handle_t *, void *); +extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *); +extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *); +extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *); +extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *); +extern int zfs_iter_snapshots(zfs_handle_t *, zfs_iter_f, void *); + +/* + * Functions to create and destroy datasets. + */ +extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t, + nvlist_t *); +extern int zfs_create_ancestors(libzfs_handle_t *, const char *); +extern int zfs_destroy(zfs_handle_t *); +extern int zfs_destroy_snaps(zfs_handle_t *, char *); +extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *); +extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *); +extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t); +extern int zfs_rename(zfs_handle_t *, const char *, boolean_t); +extern int zfs_send(zfs_handle_t *, const char *, const char *, + boolean_t, boolean_t, boolean_t, boolean_t, int); +extern int zfs_promote(zfs_handle_t *); + +typedef struct recvflags { + /* print informational messages (ie, -v was specified) */ + int verbose : 1; + + /* the destination is a prefix, not the exact fs (ie, -d) */ + int isprefix : 1; + + /* do not actually do the recv, just check if it would work (ie, -n) */ + int dryrun : 1; + + /* rollback/destroy filesystems as necessary (eg, -F) */ + int force : 1; + + /* set "canmount=off" on all modified filesystems */ + int canmountoff : 1; + + /* byteswap flag is used internally; callers need not specify */ + int byteswap : 1; +} recvflags_t; + +extern int zfs_receive(libzfs_handle_t *, const char *, recvflags_t, + int, avl_tree_t *); + +/* + * Miscellaneous functions. + */ +extern const char *zfs_type_to_name(zfs_type_t); +extern void zfs_refresh_properties(zfs_handle_t *); +extern int zfs_name_valid(const char *, zfs_type_t); +extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t); +extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *, + zfs_type_t); +extern int zfs_spa_version(zfs_handle_t *, int *); + +/* + * dataset permission functions. + */ +extern int zfs_perm_set(zfs_handle_t *, nvlist_t *); +extern int zfs_perm_remove(zfs_handle_t *, nvlist_t *); +extern int zfs_build_perms(zfs_handle_t *, char *, char *, + zfs_deleg_who_type_t, zfs_deleg_inherit_t, nvlist_t **nvlist_t); +extern int zfs_perm_get(zfs_handle_t *, zfs_allow_t **); +extern void zfs_free_allows(zfs_allow_t *); +extern void zfs_deleg_permissions(void); + +/* + * Mount support functions. + */ +extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **); +extern boolean_t zfs_is_mounted(zfs_handle_t *, char **); +extern int zfs_mount(zfs_handle_t *, const char *, int); +extern int zfs_unmount(zfs_handle_t *, const char *, int); +extern int zfs_unmountall(zfs_handle_t *, int); + +/* + * Share support functions. + */ +extern boolean_t zfs_is_shared(zfs_handle_t *); +extern int zfs_share(zfs_handle_t *); +extern int zfs_unshare(zfs_handle_t *); + +/* + * Protocol-specific share support functions. + */ +extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **); +extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **); +extern int zfs_share_nfs(zfs_handle_t *); +extern int zfs_share_smb(zfs_handle_t *); +extern int zfs_shareall(zfs_handle_t *); +extern int zfs_unshare_nfs(zfs_handle_t *, const char *); +extern int zfs_unshare_smb(zfs_handle_t *, const char *); +extern int zfs_unshareall_nfs(zfs_handle_t *); +extern int zfs_unshareall_smb(zfs_handle_t *); +extern int zfs_unshareall_bypath(zfs_handle_t *, const char *); +extern int zfs_unshareall(zfs_handle_t *); +extern boolean_t zfs_is_shared_iscsi(zfs_handle_t *); +extern int zfs_share_iscsi(zfs_handle_t *); +extern int zfs_unshare_iscsi(zfs_handle_t *); +extern int zfs_iscsi_perm_check(libzfs_handle_t *, char *, ucred_t *); +extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, + void *, void *, int, zfs_share_op_t); + +/* + * When dealing with nvlists, verify() is extremely useful + */ +#ifdef NDEBUG +#define verify(EX) ((void)(EX)) +#else +#define verify(EX) assert(EX) +#endif + +/* + * Utility function to convert a number to a human-readable form. + */ +extern void zfs_nicenum(uint64_t, char *, size_t); +extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *); + +/* + * Given a device or file, determine if it is part of a pool. + */ +extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **, + boolean_t *); + +/* + * ftyp special. Read the label from a given device. + */ +extern int zpool_read_label(int, nvlist_t **); + +/* + * Create and remove zvol /dev links. + */ +extern int zpool_create_zvol_links(zpool_handle_t *); +extern int zpool_remove_zvol_links(zpool_handle_t *); + +/* is this zvol valid for use as a dump device? */ +extern int zvol_check_dump_config(char *); + +/* + * Enable and disable datasets within a pool by mounting/unmounting and + * sharing/unsharing them. + */ +extern int zpool_enable_datasets(zpool_handle_t *, const char *, int); +extern int zpool_disable_datasets(zpool_handle_t *, boolean_t); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBZFS_H */ diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_changelist.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_changelist.c new file mode 100644 index 000000000000..b905bc6cb6af --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_changelist.c @@ -0,0 +1,713 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + * + * Portions Copyright 2007 Ramprakash Jelari + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libzfs_impl.h" + +/* + * Structure to keep track of dataset state. Before changing the 'sharenfs' or + * 'mountpoint' property, we record whether the filesystem was previously + * mounted/shared. This prior state dictates whether we remount/reshare the + * dataset after the property has been changed. + * + * The interface consists of the following sequence of functions: + * + * changelist_gather() + * changelist_prefix() + * < change property > + * changelist_postfix() + * changelist_free() + * + * Other interfaces: + * + * changelist_remove() - remove a node from a gathered list + * changelist_rename() - renames all datasets appropriately when doing a rename + * changelist_unshare() - unshares all the nodes in a given changelist + * changelist_haszonedchild() - check if there is any child exported to + * a local zone + */ +typedef struct prop_changenode { + zfs_handle_t *cn_handle; + int cn_shared; + int cn_mounted; + int cn_zoned; + boolean_t cn_needpost; /* is postfix() needed? */ + uu_list_node_t cn_listnode; +} prop_changenode_t; + +struct prop_changelist { + zfs_prop_t cl_prop; + zfs_prop_t cl_realprop; + zfs_prop_t cl_shareprop; /* used with sharenfs/sharesmb */ + uu_list_pool_t *cl_pool; + uu_list_t *cl_list; + boolean_t cl_waslegacy; + boolean_t cl_allchildren; + boolean_t cl_alldependents; + int cl_mflags; /* Mount flags */ + int cl_gflags; /* Gather request flags */ + boolean_t cl_haszonedchild; + boolean_t cl_sorted; +}; + +/* + * If the property is 'mountpoint', go through and unmount filesystems as + * necessary. We don't do the same for 'sharenfs', because we can just re-share + * with different options without interrupting service. We do handle 'sharesmb' + * since there may be old resource names that need to be removed. + */ +int +changelist_prefix(prop_changelist_t *clp) +{ + prop_changenode_t *cn; + int ret = 0; + + if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && + clp->cl_prop != ZFS_PROP_SHARESMB) + return (0); + + for (cn = uu_list_first(clp->cl_list); cn != NULL; + cn = uu_list_next(clp->cl_list, cn)) { + + /* if a previous loop failed, set the remaining to false */ + if (ret == -1) { + cn->cn_needpost = B_FALSE; + continue; + } + + /* + * If we are in the global zone, but this dataset is exported + * to a local zone, do nothing. + */ + if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) + continue; + + if (ZFS_IS_VOLUME(cn->cn_handle)) { + switch (clp->cl_realprop) { + case ZFS_PROP_NAME: + /* + * If this was a rename, unshare the zvol, and + * remove the /dev/zvol links. + */ + (void) zfs_unshare_iscsi(cn->cn_handle); + + if (zvol_remove_link(cn->cn_handle->zfs_hdl, + cn->cn_handle->zfs_name) != 0) { + ret = -1; + cn->cn_needpost = B_FALSE; + (void) zfs_share_iscsi(cn->cn_handle); + } + break; + + case ZFS_PROP_VOLSIZE: + /* + * If this was a change to the volume size, we + * need to unshare and reshare the volume. + */ + (void) zfs_unshare_iscsi(cn->cn_handle); + break; + } + } else { + /* + * Do the property specific processing. + */ + switch (clp->cl_prop) { + case ZFS_PROP_MOUNTPOINT: + if (zfs_unmount(cn->cn_handle, NULL, + clp->cl_mflags) != 0) { + ret = -1; + cn->cn_needpost = B_FALSE; + } + break; + case ZFS_PROP_SHARESMB: + (void) zfs_unshare_smb(cn->cn_handle, NULL); + break; + } + } + } + + if (ret == -1) + (void) changelist_postfix(clp); + + return (ret); +} + +/* + * If the property is 'mountpoint' or 'sharenfs', go through and remount and/or + * reshare the filesystems as necessary. In changelist_gather() we recorded + * whether the filesystem was previously shared or mounted. The action we take + * depends on the previous state, and whether the value was previously 'legacy'. + * For non-legacy properties, we only remount/reshare the filesystem if it was + * previously mounted/shared. Otherwise, we always remount/reshare the + * filesystem. + */ +int +changelist_postfix(prop_changelist_t *clp) +{ + prop_changenode_t *cn; + char shareopts[ZFS_MAXPROPLEN]; + int errors = 0; + libzfs_handle_t *hdl; + + /* + * If we're changing the mountpoint, attempt to destroy the underlying + * mountpoint. All other datasets will have inherited from this dataset + * (in which case their mountpoints exist in the filesystem in the new + * location), or have explicit mountpoints set (in which case they won't + * be in the changelist). + */ + if ((cn = uu_list_last(clp->cl_list)) == NULL) + return (0); + + if (clp->cl_prop == ZFS_PROP_MOUNTPOINT) + remove_mountpoint(cn->cn_handle); + + /* + * It is possible that the changelist_prefix() used libshare + * to unshare some entries. Since libshare caches data, an + * attempt to reshare during postfix can fail unless libshare + * is uninitialized here so that it will reinitialize later. + */ + if (cn->cn_handle != NULL) { + hdl = cn->cn_handle->zfs_hdl; + assert(hdl != NULL); + zfs_uninit_libshare(hdl); + } + + /* + * We walk the datasets in reverse, because we want to mount any parent + * datasets before mounting the children. We walk all datasets even if + * there are errors. + */ + for (cn = uu_list_last(clp->cl_list); cn != NULL; + cn = uu_list_prev(clp->cl_list, cn)) { + + boolean_t sharenfs; + boolean_t sharesmb; + + /* + * If we are in the global zone, but this dataset is exported + * to a local zone, do nothing. + */ + if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) + continue; + + /* Only do post-processing if it's required */ + if (!cn->cn_needpost) + continue; + cn->cn_needpost = B_FALSE; + + zfs_refresh_properties(cn->cn_handle); + + if (ZFS_IS_VOLUME(cn->cn_handle)) { + /* + * If we're doing a rename, recreate the /dev/zvol + * links. + */ + if (clp->cl_realprop == ZFS_PROP_NAME && + zvol_create_link(cn->cn_handle->zfs_hdl, + cn->cn_handle->zfs_name) != 0) { + errors++; + } else if (cn->cn_shared || + clp->cl_prop == ZFS_PROP_SHAREISCSI) { + if (zfs_prop_get(cn->cn_handle, + ZFS_PROP_SHAREISCSI, shareopts, + sizeof (shareopts), NULL, NULL, 0, + B_FALSE) == 0 && + strcmp(shareopts, "off") == 0) { + errors += + zfs_unshare_iscsi(cn->cn_handle); + } else { + errors += + zfs_share_iscsi(cn->cn_handle); + } + } + + continue; + } + + /* + * Remount if previously mounted or mountpoint was legacy, + * or sharenfs or sharesmb property is set. + */ + sharenfs = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS, + shareopts, sizeof (shareopts), NULL, NULL, 0, + B_FALSE) == 0) && (strcmp(shareopts, "off") != 0)); + + sharesmb = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARESMB, + shareopts, sizeof (shareopts), NULL, NULL, 0, + B_FALSE) == 0) && (strcmp(shareopts, "off") != 0)); + + if ((cn->cn_mounted || clp->cl_waslegacy || sharenfs || + sharesmb) && !zfs_is_mounted(cn->cn_handle, NULL) && + zfs_mount(cn->cn_handle, NULL, 0) != 0) + errors++; + + /* + * We always re-share even if the filesystem is currently + * shared, so that we can adopt any new options. + */ + if (sharenfs) + errors += zfs_share_nfs(cn->cn_handle); + else if (cn->cn_shared || clp->cl_waslegacy) + errors += zfs_unshare_nfs(cn->cn_handle, NULL); + if (sharesmb) + errors += zfs_share_smb(cn->cn_handle); + else if (cn->cn_shared || clp->cl_waslegacy) + errors += zfs_unshare_smb(cn->cn_handle, NULL); + } + + return (errors ? -1 : 0); +} + +/* + * Is this "dataset" a child of "parent"? + */ +boolean_t +isa_child_of(const char *dataset, const char *parent) +{ + int len; + + len = strlen(parent); + + if (strncmp(dataset, parent, len) == 0 && + (dataset[len] == '@' || dataset[len] == '/' || + dataset[len] == '\0')) + return (B_TRUE); + else + return (B_FALSE); + +} + +/* + * If we rename a filesystem, child filesystem handles are no longer valid + * since we identify each dataset by its name in the ZFS namespace. As a + * result, we have to go through and fix up all the names appropriately. We + * could do this automatically if libzfs kept track of all open handles, but + * this is a lot less work. + */ +void +changelist_rename(prop_changelist_t *clp, const char *src, const char *dst) +{ + prop_changenode_t *cn; + char newname[ZFS_MAXNAMELEN]; + + for (cn = uu_list_first(clp->cl_list); cn != NULL; + cn = uu_list_next(clp->cl_list, cn)) { + /* + * Do not rename a clone that's not in the source hierarchy. + */ + if (!isa_child_of(cn->cn_handle->zfs_name, src)) + continue; + + /* + * Destroy the previous mountpoint if needed. + */ + remove_mountpoint(cn->cn_handle); + + (void) strlcpy(newname, dst, sizeof (newname)); + (void) strcat(newname, cn->cn_handle->zfs_name + strlen(src)); + + (void) strlcpy(cn->cn_handle->zfs_name, newname, + sizeof (cn->cn_handle->zfs_name)); + } +} + +/* + * Given a gathered changelist for the 'sharenfs' or 'sharesmb' property, + * unshare all the datasets in the list. + */ +int +changelist_unshare(prop_changelist_t *clp, zfs_share_proto_t *proto) +{ + prop_changenode_t *cn; + int ret = 0; + + if (clp->cl_prop != ZFS_PROP_SHARENFS && + clp->cl_prop != ZFS_PROP_SHARESMB) + return (0); + + for (cn = uu_list_first(clp->cl_list); cn != NULL; + cn = uu_list_next(clp->cl_list, cn)) { + if (zfs_unshare_proto(cn->cn_handle, NULL, proto) != 0) + ret = -1; + } + + return (ret); +} + +/* + * Check if there is any child exported to a local zone in a given changelist. + * This information has already been recorded while gathering the changelist + * via changelist_gather(). + */ +int +changelist_haszonedchild(prop_changelist_t *clp) +{ + return (clp->cl_haszonedchild); +} + +/* + * Remove a node from a gathered list. + */ +void +changelist_remove(prop_changelist_t *clp, const char *name) +{ + prop_changenode_t *cn; + + for (cn = uu_list_first(clp->cl_list); cn != NULL; + cn = uu_list_next(clp->cl_list, cn)) { + + if (strcmp(cn->cn_handle->zfs_name, name) == 0) { + uu_list_remove(clp->cl_list, cn); + zfs_close(cn->cn_handle); + free(cn); + return; + } + } +} + +/* + * Release any memory associated with a changelist. + */ +void +changelist_free(prop_changelist_t *clp) +{ + prop_changenode_t *cn; + void *cookie; + + if (clp->cl_list) { + cookie = NULL; + while ((cn = uu_list_teardown(clp->cl_list, &cookie)) != NULL) { + zfs_close(cn->cn_handle); + free(cn); + } + + uu_list_destroy(clp->cl_list); + } + if (clp->cl_pool) + uu_list_pool_destroy(clp->cl_pool); + + free(clp); +} + +static int +change_one(zfs_handle_t *zhp, void *data) +{ + prop_changelist_t *clp = data; + char property[ZFS_MAXPROPLEN]; + char where[64]; + prop_changenode_t *cn; + zprop_source_t sourcetype; + zprop_source_t share_sourcetype; + + /* + * We only want to unmount/unshare those filesystems that may inherit + * from the target filesystem. If we find any filesystem with a + * locally set mountpoint, we ignore any children since changing the + * property will not affect them. If this is a rename, we iterate + * over all children regardless, since we need them unmounted in + * order to do the rename. Also, if this is a volume and we're doing + * a rename, then always add it to the changelist. + */ + + if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) && + zfs_prop_get(zhp, clp->cl_prop, property, + sizeof (property), &sourcetype, where, sizeof (where), + B_FALSE) != 0) { + zfs_close(zhp); + return (0); + } + + /* + * If we are "watching" sharenfs or sharesmb + * then check out the companion property which is tracked + * in cl_shareprop + */ + if (clp->cl_shareprop != ZPROP_INVAL && + zfs_prop_get(zhp, clp->cl_shareprop, property, + sizeof (property), &share_sourcetype, where, sizeof (where), + B_FALSE) != 0) { + zfs_close(zhp); + return (0); + } + + if (clp->cl_alldependents || clp->cl_allchildren || + sourcetype == ZPROP_SRC_DEFAULT || + sourcetype == ZPROP_SRC_INHERITED || + (clp->cl_shareprop != ZPROP_INVAL && + (share_sourcetype == ZPROP_SRC_DEFAULT || + share_sourcetype == ZPROP_SRC_INHERITED))) { + if ((cn = zfs_alloc(zfs_get_handle(zhp), + sizeof (prop_changenode_t))) == NULL) { + zfs_close(zhp); + return (-1); + } + + cn->cn_handle = zhp; + cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) || + zfs_is_mounted(zhp, NULL); + cn->cn_shared = zfs_is_shared(zhp); + cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); + cn->cn_needpost = B_TRUE; + + /* Indicate if any child is exported to a local zone. */ + if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) + clp->cl_haszonedchild = B_TRUE; + + uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); + + if (clp->cl_sorted) { + uu_list_index_t idx; + + (void) uu_list_find(clp->cl_list, cn, NULL, + &idx); + uu_list_insert(clp->cl_list, cn, idx); + } else { + ASSERT(!clp->cl_alldependents); + verify(uu_list_insert_before(clp->cl_list, + uu_list_first(clp->cl_list), cn) == 0); + } + + if (!clp->cl_alldependents) + return (zfs_iter_children(zhp, change_one, data)); + } else { + zfs_close(zhp); + } + + return (0); +} + +/*ARGSUSED*/ +static int +compare_mountpoints(const void *a, const void *b, void *unused) +{ + const prop_changenode_t *ca = a; + const prop_changenode_t *cb = b; + + char mounta[MAXPATHLEN]; + char mountb[MAXPATHLEN]; + + boolean_t hasmounta, hasmountb; + + /* + * When unsharing or unmounting filesystems, we need to do it in + * mountpoint order. This allows the user to have a mountpoint + * hierarchy that is different from the dataset hierarchy, and still + * allow it to be changed. However, if either dataset doesn't have a + * mountpoint (because it is a volume or a snapshot), we place it at the + * end of the list, because it doesn't affect our change at all. + */ + hasmounta = (zfs_prop_get(ca->cn_handle, ZFS_PROP_MOUNTPOINT, mounta, + sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0); + hasmountb = (zfs_prop_get(cb->cn_handle, ZFS_PROP_MOUNTPOINT, mountb, + sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0); + + if (!hasmounta && hasmountb) + return (-1); + else if (hasmounta && !hasmountb) + return (1); + else if (!hasmounta && !hasmountb) + return (0); + else + return (strcmp(mountb, mounta)); +} + +/* + * Given a ZFS handle and a property, construct a complete list of datasets + * that need to be modified as part of this process. For anything but the + * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. + * Otherwise, we iterate over all children and look for any datasets that + * inherit the property. For each such dataset, we add it to the list and + * mark whether it was shared beforehand. + */ +prop_changelist_t * +changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags, + int mnt_flags) +{ + prop_changelist_t *clp; + prop_changenode_t *cn; + zfs_handle_t *temp; + char property[ZFS_MAXPROPLEN]; + uu_compare_fn_t *compare = NULL; + + if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) + return (NULL); + + /* + * For mountpoint-related tasks, we want to sort everything by + * mountpoint, so that we mount and unmount them in the appropriate + * order, regardless of their position in the hierarchy. + */ + if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || + prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS || + prop == ZFS_PROP_SHARESMB) { + compare = compare_mountpoints; + clp->cl_sorted = B_TRUE; + } + + clp->cl_pool = uu_list_pool_create("changelist_pool", + sizeof (prop_changenode_t), + offsetof(prop_changenode_t, cn_listnode), + compare, 0); + if (clp->cl_pool == NULL) { + assert(uu_error() == UU_ERROR_NO_MEMORY); + (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); + changelist_free(clp); + return (NULL); + } + + clp->cl_list = uu_list_create(clp->cl_pool, NULL, + clp->cl_sorted ? UU_LIST_SORTED : 0); + clp->cl_gflags = gather_flags; + clp->cl_mflags = mnt_flags; + + if (clp->cl_list == NULL) { + assert(uu_error() == UU_ERROR_NO_MEMORY); + (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); + changelist_free(clp); + return (NULL); + } + + /* + * If this is a rename or the 'zoned' property, we pretend we're + * changing the mountpoint and flag it so we can catch all children in + * change_one(). + * + * Flag cl_alldependents to catch all children plus the dependents + * (clones) that are not in the hierarchy. + */ + if (prop == ZFS_PROP_NAME) { + clp->cl_prop = ZFS_PROP_MOUNTPOINT; + clp->cl_alldependents = B_TRUE; + } else if (prop == ZFS_PROP_ZONED) { + clp->cl_prop = ZFS_PROP_MOUNTPOINT; + clp->cl_allchildren = B_TRUE; + } else if (prop == ZFS_PROP_CANMOUNT) { + clp->cl_prop = ZFS_PROP_MOUNTPOINT; + } else if (prop == ZFS_PROP_VOLSIZE) { + clp->cl_prop = ZFS_PROP_MOUNTPOINT; + } else if (prop == ZFS_PROP_VERSION) { + clp->cl_prop = ZFS_PROP_MOUNTPOINT; + } else { + clp->cl_prop = prop; + } + clp->cl_realprop = prop; + + if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && + clp->cl_prop != ZFS_PROP_SHARENFS && + clp->cl_prop != ZFS_PROP_SHARESMB && + clp->cl_prop != ZFS_PROP_SHAREISCSI) + return (clp); + + /* + * If watching SHARENFS or SHARESMB then + * also watch its companion property. + */ + if (clp->cl_prop == ZFS_PROP_SHARENFS) + clp->cl_shareprop = ZFS_PROP_SHARESMB; + else if (clp->cl_prop == ZFS_PROP_SHARESMB) + clp->cl_shareprop = ZFS_PROP_SHARENFS; + + if (clp->cl_alldependents) { + if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { + changelist_free(clp); + return (NULL); + } + } else if (zfs_iter_children(zhp, change_one, clp) != 0) { + changelist_free(clp); + return (NULL); + } + + /* + * We have to re-open ourselves because we auto-close all the handles + * and can't tell the difference. + */ + if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), + ZFS_TYPE_DATASET)) == NULL) { + changelist_free(clp); + return (NULL); + } + + /* + * Always add ourself to the list. We add ourselves to the end so that + * we're the last to be unmounted. + */ + if ((cn = zfs_alloc(zhp->zfs_hdl, + sizeof (prop_changenode_t))) == NULL) { + zfs_close(temp); + changelist_free(clp); + return (NULL); + } + + cn->cn_handle = temp; + cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) || + zfs_is_mounted(temp, NULL); + cn->cn_shared = zfs_is_shared(temp); + cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); + cn->cn_needpost = B_TRUE; + + uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); + if (clp->cl_sorted) { + uu_list_index_t idx; + (void) uu_list_find(clp->cl_list, cn, NULL, &idx); + uu_list_insert(clp->cl_list, cn, idx); + } else { + verify(uu_list_insert_after(clp->cl_list, + uu_list_last(clp->cl_list), cn) == 0); + } + + /* + * If the mountpoint property was previously 'legacy', or 'none', + * record it as the behavior of changelist_postfix() will be different. + */ + if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && + (zfs_prop_get(zhp, prop, property, sizeof (property), + NULL, NULL, 0, B_FALSE) == 0 && + (strcmp(property, "legacy") == 0 || + strcmp(property, "none") == 0))) { + /* + * do not automatically mount ex-legacy datasets if + * we specifically set canmount to noauto + */ + if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) != + ZFS_CANMOUNT_NOAUTO) + clp->cl_waslegacy = B_TRUE; + } + + return (clp); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_config.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_config.c new file mode 100644 index 000000000000..94640d1b128c --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_config.c @@ -0,0 +1,360 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * The pool configuration repository is stored in /etc/zfs/zpool.cache as a + * single packed nvlist. While it would be nice to just read in this + * file from userland, this wouldn't work from a local zone. So we have to have + * a zpool ioctl to return the complete configuration for all pools. In the + * global zone, this will be identical to reading the file and unpacking it in + * userland. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libzfs_impl.h" + +typedef struct config_node { + char *cn_name; + nvlist_t *cn_config; + uu_avl_node_t cn_avl; +} config_node_t; + +/* ARGSUSED */ +static int +config_node_compare(const void *a, const void *b, void *unused) +{ + int ret; + + const config_node_t *ca = (config_node_t *)a; + const config_node_t *cb = (config_node_t *)b; + + ret = strcmp(ca->cn_name, cb->cn_name); + + if (ret < 0) + return (-1); + else if (ret > 0) + return (1); + else + return (0); +} + +void +namespace_clear(libzfs_handle_t *hdl) +{ + if (hdl->libzfs_ns_avl) { + config_node_t *cn; + void *cookie = NULL; + + while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, + &cookie)) != NULL) { + nvlist_free(cn->cn_config); + free(cn->cn_name); + free(cn); + } + + uu_avl_destroy(hdl->libzfs_ns_avl); + hdl->libzfs_ns_avl = NULL; + } + + if (hdl->libzfs_ns_avlpool) { + uu_avl_pool_destroy(hdl->libzfs_ns_avlpool); + hdl->libzfs_ns_avlpool = NULL; + } +} + +/* + * Loads the pool namespace, or re-loads it if the cache has changed. + */ +static int +namespace_reload(libzfs_handle_t *hdl) +{ + nvlist_t *config; + config_node_t *cn; + nvpair_t *elem; + zfs_cmd_t zc = { 0 }; + void *cookie; + + if (hdl->libzfs_ns_gen == 0) { + /* + * This is the first time we've accessed the configuration + * cache. Initialize the AVL tree and then fall through to the + * common code. + */ + if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool", + sizeof (config_node_t), + offsetof(config_node_t, cn_avl), + config_node_compare, UU_DEFAULT)) == NULL) + return (no_memory(hdl)); + + if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool, + NULL, UU_DEFAULT)) == NULL) + return (no_memory(hdl)); + } + + if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) + return (-1); + + for (;;) { + zc.zc_cookie = hdl->libzfs_ns_gen; + if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { + switch (errno) { + case EEXIST: + /* + * The namespace hasn't changed. + */ + zcmd_free_nvlists(&zc); + return (0); + + case ENOMEM: + if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + break; + + default: + zcmd_free_nvlists(&zc); + return (zfs_standard_error(hdl, errno, + dgettext(TEXT_DOMAIN, "failed to read " + "pool configuration"))); + } + } else { + hdl->libzfs_ns_gen = zc.zc_cookie; + break; + } + } + + if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + + zcmd_free_nvlists(&zc); + + /* + * Clear out any existing configuration information. + */ + cookie = NULL; + while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) { + nvlist_free(cn->cn_config); + free(cn->cn_name); + free(cn); + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(config, elem)) != NULL) { + nvlist_t *child; + uu_avl_index_t where; + + if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) { + nvlist_free(config); + return (-1); + } + + if ((cn->cn_name = zfs_strdup(hdl, + nvpair_name(elem))) == NULL) { + free(cn); + nvlist_free(config); + return (-1); + } + + verify(nvpair_value_nvlist(elem, &child) == 0); + if (nvlist_dup(child, &cn->cn_config, 0) != 0) { + free(cn->cn_name); + free(cn); + nvlist_free(config); + return (no_memory(hdl)); + } + verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where) + == NULL); + + uu_avl_insert(hdl->libzfs_ns_avl, cn, where); + } + + nvlist_free(config); + return (0); +} + +/* + * Retrieve the configuration for the given pool. The configuration is a nvlist + * describing the vdevs, as well as the statistics associated with each one. + */ +nvlist_t * +zpool_get_config(zpool_handle_t *zhp, nvlist_t **oldconfig) +{ + if (oldconfig) + *oldconfig = zhp->zpool_old_config; + return (zhp->zpool_config); +} + +/* + * Refresh the vdev statistics associated with the given pool. This is used in + * iostat to show configuration changes and determine the delta from the last + * time the function was called. This function can fail, in case the pool has + * been destroyed. + */ +int +zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing) +{ + zfs_cmd_t zc = { 0 }; + int error; + nvlist_t *config; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + *missing = B_FALSE; + (void) strcpy(zc.zc_name, zhp->zpool_name); + + if (zhp->zpool_config_size == 0) + zhp->zpool_config_size = 1 << 16; + + if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size) != 0) + return (-1); + + for (;;) { + if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_STATS, + &zc) == 0) { + /* + * The real error is returned in the zc_cookie field. + */ + error = zc.zc_cookie; + break; + } + + if (errno == ENOMEM) { + if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + } else { + zcmd_free_nvlists(&zc); + if (errno == ENOENT || errno == EINVAL) + *missing = B_TRUE; + zhp->zpool_state = POOL_STATE_UNAVAIL; + return (0); + } + } + + if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + + zcmd_free_nvlists(&zc); + + zhp->zpool_config_size = zc.zc_nvlist_dst_size; + + if (zhp->zpool_config != NULL) { + uint64_t oldtxg, newtxg; + + verify(nvlist_lookup_uint64(zhp->zpool_config, + ZPOOL_CONFIG_POOL_TXG, &oldtxg) == 0); + verify(nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_TXG, &newtxg) == 0); + + if (zhp->zpool_old_config != NULL) + nvlist_free(zhp->zpool_old_config); + + if (oldtxg != newtxg) { + nvlist_free(zhp->zpool_config); + zhp->zpool_old_config = NULL; + } else { + zhp->zpool_old_config = zhp->zpool_config; + } + } + + zhp->zpool_config = config; + if (error) + zhp->zpool_state = POOL_STATE_UNAVAIL; + else + zhp->zpool_state = POOL_STATE_ACTIVE; + + return (0); +} + +/* + * Iterate over all pools in the system. + */ +int +zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data) +{ + config_node_t *cn; + zpool_handle_t *zhp; + int ret; + + if (namespace_reload(hdl) != 0) + return (-1); + + for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL; + cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) { + + if (zpool_open_silent(hdl, cn->cn_name, &zhp) != 0) + return (-1); + + if (zhp == NULL) + continue; + + if ((ret = func(zhp, data)) != 0) + return (ret); + } + + return (0); +} + +/* + * Iterate over root datasets, calling the given function for each. The zfs + * handle passed each time must be explicitly closed by the callback. + */ +int +zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data) +{ + config_node_t *cn; + zfs_handle_t *zhp; + int ret; + + if (namespace_reload(hdl) != 0) + return (-1); + + for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL; + cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) { + + if ((zhp = make_dataset_handle(hdl, cn->cn_name)) == NULL) + continue; + + if ((ret = func(zhp, data)) != 0) + return (ret); + } + + return (0); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_dataset.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_dataset.c new file mode 100644 index 000000000000..a8005ffc0cf5 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_dataset.c @@ -0,0 +1,4248 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "zfs_namecheck.h" +#include "zfs_prop.h" +#include "libzfs_impl.h" +#include "zfs_deleg.h" + +static int zvol_create_link_common(libzfs_handle_t *, const char *, int); + +/* + * Given a single type (not a mask of types), return the type in a human + * readable form. + */ +const char * +zfs_type_to_name(zfs_type_t type) +{ + switch (type) { + case ZFS_TYPE_FILESYSTEM: + return (dgettext(TEXT_DOMAIN, "filesystem")); + case ZFS_TYPE_SNAPSHOT: + return (dgettext(TEXT_DOMAIN, "snapshot")); + case ZFS_TYPE_VOLUME: + return (dgettext(TEXT_DOMAIN, "volume")); + } + + return (NULL); +} + +/* + * Given a path and mask of ZFS types, return a string describing this dataset. + * This is used when we fail to open a dataset and we cannot get an exact type. + * We guess what the type would have been based on the path and the mask of + * acceptable types. + */ +static const char * +path_to_str(const char *path, int types) +{ + /* + * When given a single type, always report the exact type. + */ + if (types == ZFS_TYPE_SNAPSHOT) + return (dgettext(TEXT_DOMAIN, "snapshot")); + if (types == ZFS_TYPE_FILESYSTEM) + return (dgettext(TEXT_DOMAIN, "filesystem")); + if (types == ZFS_TYPE_VOLUME) + return (dgettext(TEXT_DOMAIN, "volume")); + + /* + * The user is requesting more than one type of dataset. If this is the + * case, consult the path itself. If we're looking for a snapshot, and + * a '@' is found, then report it as "snapshot". Otherwise, remove the + * snapshot attribute and try again. + */ + if (types & ZFS_TYPE_SNAPSHOT) { + if (strchr(path, '@') != NULL) + return (dgettext(TEXT_DOMAIN, "snapshot")); + return (path_to_str(path, types & ~ZFS_TYPE_SNAPSHOT)); + } + + + /* + * The user has requested either filesystems or volumes. + * We have no way of knowing a priori what type this would be, so always + * report it as "filesystem" or "volume", our two primitive types. + */ + if (types & ZFS_TYPE_FILESYSTEM) + return (dgettext(TEXT_DOMAIN, "filesystem")); + + assert(types & ZFS_TYPE_VOLUME); + return (dgettext(TEXT_DOMAIN, "volume")); +} + +/* + * Validate a ZFS path. This is used even before trying to open the dataset, to + * provide a more meaningful error message. We place a more useful message in + * 'buf' detailing exactly why the name was not valid. + */ +static int +zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type, + boolean_t modifying) +{ + namecheck_err_t why; + char what; + + if (dataset_namecheck(path, &why, &what) != 0) { + if (hdl != NULL) { + switch (why) { + case NAME_ERR_TOOLONG: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "name is too long")); + break; + + case NAME_ERR_LEADING_SLASH: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "leading slash in name")); + break; + + case NAME_ERR_EMPTY_COMPONENT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "empty component in name")); + break; + + case NAME_ERR_TRAILING_SLASH: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "trailing slash in name")); + break; + + case NAME_ERR_INVALCHAR: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "invalid character " + "'%c' in name"), what); + break; + + case NAME_ERR_MULTIPLE_AT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "multiple '@' delimiters in name")); + break; + + case NAME_ERR_NOLETTER: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool doesn't begin with a letter")); + break; + + case NAME_ERR_RESERVED: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "name is reserved")); + break; + + case NAME_ERR_DISKLIKE: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "reserved disk name")); + break; + } + } + + return (0); + } + + if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) { + if (hdl != NULL) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "snapshot delimiter '@' in filesystem name")); + return (0); + } + + if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) { + if (hdl != NULL) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "missing '@' delimiter in snapshot name")); + return (0); + } + + if (modifying && strchr(path, '%') != NULL) { + if (hdl != NULL) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid character %c in name"), '%'); + return (0); + } + + return (-1); +} + +int +zfs_name_valid(const char *name, zfs_type_t type) +{ + if (type == ZFS_TYPE_POOL) + return (zpool_name_valid(NULL, B_FALSE, name)); + return (zfs_validate_name(NULL, name, type, B_FALSE)); +} + +/* + * This function takes the raw DSL properties, and filters out the user-defined + * properties into a separate nvlist. + */ +static nvlist_t * +process_user_props(zfs_handle_t *zhp, nvlist_t *props) +{ + libzfs_handle_t *hdl = zhp->zfs_hdl; + nvpair_t *elem; + nvlist_t *propval; + nvlist_t *nvl; + + if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { + (void) no_memory(hdl); + return (NULL); + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { + if (!zfs_prop_user(nvpair_name(elem))) + continue; + + verify(nvpair_value_nvlist(elem, &propval) == 0); + if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) { + nvlist_free(nvl); + (void) no_memory(hdl); + return (NULL); + } + } + + return (nvl); +} + +static zpool_handle_t * +zpool_add_handle(zfs_handle_t *zhp, const char *pool_name) +{ + libzfs_handle_t *hdl = zhp->zfs_hdl; + zpool_handle_t *zph; + + if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) { + if (hdl->libzfs_pool_handles != NULL) + zph->zpool_next = hdl->libzfs_pool_handles; + hdl->libzfs_pool_handles = zph; + } + return (zph); +} + +static zpool_handle_t * +zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len) +{ + libzfs_handle_t *hdl = zhp->zfs_hdl; + zpool_handle_t *zph = hdl->libzfs_pool_handles; + + while ((zph != NULL) && + (strncmp(pool_name, zpool_get_name(zph), len) != 0)) + zph = zph->zpool_next; + return (zph); +} + +/* + * Returns a handle to the pool that contains the provided dataset. + * If a handle to that pool already exists then that handle is returned. + * Otherwise, a new handle is created and added to the list of handles. + */ +static zpool_handle_t * +zpool_handle(zfs_handle_t *zhp) +{ + char *pool_name; + int len; + zpool_handle_t *zph; + + len = strcspn(zhp->zfs_name, "/@") + 1; + pool_name = zfs_alloc(zhp->zfs_hdl, len); + (void) strlcpy(pool_name, zhp->zfs_name, len); + + zph = zpool_find_handle(zhp, pool_name, len); + if (zph == NULL) + zph = zpool_add_handle(zhp, pool_name); + + free(pool_name); + return (zph); +} + +void +zpool_free_handles(libzfs_handle_t *hdl) +{ + zpool_handle_t *next, *zph = hdl->libzfs_pool_handles; + + while (zph != NULL) { + next = zph->zpool_next; + zpool_close(zph); + zph = next; + } + hdl->libzfs_pool_handles = NULL; +} + +/* + * Utility function to gather stats (objset and zpl) for the given object. + */ +static int +get_stats(zfs_handle_t *zhp) +{ + zfs_cmd_t zc = { 0 }; + libzfs_handle_t *hdl = zhp->zfs_hdl; + nvlist_t *allprops, *userprops; + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) + return (-1); + + while (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { + if (errno == ENOMEM) { + if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + } else { + zcmd_free_nvlists(&zc); + return (-1); + } + } + + zhp->zfs_dmustats = zc.zc_objset_stats; /* structure assignment */ + + if (zcmd_read_dst_nvlist(hdl, &zc, &allprops) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + + zcmd_free_nvlists(&zc); + + if ((userprops = process_user_props(zhp, allprops)) == NULL) { + nvlist_free(allprops); + return (-1); + } + + nvlist_free(zhp->zfs_props); + nvlist_free(zhp->zfs_user_props); + + zhp->zfs_props = allprops; + zhp->zfs_user_props = userprops; + + return (0); +} + +/* + * Refresh the properties currently stored in the handle. + */ +void +zfs_refresh_properties(zfs_handle_t *zhp) +{ + (void) get_stats(zhp); +} + +/* + * Makes a handle from the given dataset name. Used by zfs_open() and + * zfs_iter_* to create child handles on the fly. + */ +zfs_handle_t * +make_dataset_handle(libzfs_handle_t *hdl, const char *path) +{ + zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1); + char *logstr; + + if (zhp == NULL) + return (NULL); + + zhp->zfs_hdl = hdl; + + /* + * Preserve history log string. + * any changes performed here will be + * logged as an internal event. + */ + logstr = zhp->zfs_hdl->libzfs_log_str; + zhp->zfs_hdl->libzfs_log_str = NULL; +top: + (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name)); + + if (get_stats(zhp) != 0) { + zhp->zfs_hdl->libzfs_log_str = logstr; + free(zhp); + return (NULL); + } + + if (zhp->zfs_dmustats.dds_inconsistent) { + zfs_cmd_t zc = { 0 }; + + /* + * If it is dds_inconsistent, then we've caught it in + * the middle of a 'zfs receive' or 'zfs destroy', and + * it is inconsistent from the ZPL's point of view, so + * can't be mounted. However, it could also be that we + * have crashed in the middle of one of those + * operations, in which case we need to get rid of the + * inconsistent state. We do that by either rolling + * back to the previous snapshot (which will fail if + * there is none), or destroying the filesystem. Note + * that if we are still in the middle of an active + * 'receive' or 'destroy', then the rollback and destroy + * will fail with EBUSY and we will drive on as usual. + */ + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) { + (void) zvol_remove_link(hdl, zhp->zfs_name); + zc.zc_objset_type = DMU_OST_ZVOL; + } else { + zc.zc_objset_type = DMU_OST_ZFS; + } + + /* + * If we can successfully destroy it, pretend that it + * never existed. + */ + if (ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc) == 0) { + zhp->zfs_hdl->libzfs_log_str = logstr; + free(zhp); + errno = ENOENT; + return (NULL); + } + /* If we can successfully roll it back, reget the stats */ + if (ioctl(hdl->libzfs_fd, ZFS_IOC_ROLLBACK, &zc) == 0) + goto top; + } + + /* + * We've managed to open the dataset and gather statistics. Determine + * the high-level type. + */ + if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) + zhp->zfs_head_type = ZFS_TYPE_VOLUME; + else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) + zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM; + else + abort(); + + if (zhp->zfs_dmustats.dds_is_snapshot) + zhp->zfs_type = ZFS_TYPE_SNAPSHOT; + else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) + zhp->zfs_type = ZFS_TYPE_VOLUME; + else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) + zhp->zfs_type = ZFS_TYPE_FILESYSTEM; + else + abort(); /* we should never see any other types */ + + zhp->zfs_hdl->libzfs_log_str = logstr; + zhp->zpool_hdl = zpool_handle(zhp); + return (zhp); +} + +/* + * Opens the given snapshot, filesystem, or volume. The 'types' + * argument is a mask of acceptable types. The function will print an + * appropriate error message and return NULL if it can't be opened. + */ +zfs_handle_t * +zfs_open(libzfs_handle_t *hdl, const char *path, int types) +{ + zfs_handle_t *zhp; + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "cannot open '%s'"), path); + + /* + * Validate the name before we even try to open it. + */ + if (!zfs_validate_name(hdl, path, ZFS_TYPE_DATASET, B_FALSE)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid dataset name")); + (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); + return (NULL); + } + + /* + * Try to get stats for the dataset, which will tell us if it exists. + */ + errno = 0; + if ((zhp = make_dataset_handle(hdl, path)) == NULL) { + (void) zfs_standard_error(hdl, errno, errbuf); + return (NULL); + } + + if (!(types & zhp->zfs_type)) { + (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); + zfs_close(zhp); + return (NULL); + } + + return (zhp); +} + +/* + * Release a ZFS handle. Nothing to do but free the associated memory. + */ +void +zfs_close(zfs_handle_t *zhp) +{ + if (zhp->zfs_mntopts) + free(zhp->zfs_mntopts); + nvlist_free(zhp->zfs_props); + nvlist_free(zhp->zfs_user_props); + free(zhp); +} + +int +zfs_spa_version(zfs_handle_t *zhp, int *spa_version) +{ + zpool_handle_t *zpool_handle = zhp->zpool_hdl; + + if (zpool_handle == NULL) + return (-1); + + *spa_version = zpool_get_prop_int(zpool_handle, + ZPOOL_PROP_VERSION, NULL); + return (0); +} + +/* + * The choice of reservation property depends on the SPA version. + */ +static int +zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop) +{ + int spa_version; + + if (zfs_spa_version(zhp, &spa_version) < 0) + return (-1); + + if (spa_version >= SPA_VERSION_REFRESERVATION) + *resv_prop = ZFS_PROP_REFRESERVATION; + else + *resv_prop = ZFS_PROP_RESERVATION; + + return (0); +} + +/* + * Given an nvlist of properties to set, validates that they are correct, and + * parses any numeric properties (index, boolean, etc) if they are specified as + * strings. + */ +nvlist_t * +zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl, + uint64_t zoned, zfs_handle_t *zhp, const char *errbuf) +{ + nvpair_t *elem; + uint64_t intval; + char *strval; + zfs_prop_t prop; + nvlist_t *ret; + int chosen_normal = -1; + int chosen_utf = -1; + + if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) { + (void) no_memory(hdl); + return (NULL); + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { + const char *propname = nvpair_name(elem); + + /* + * Make sure this property is valid and applies to this type. + */ + if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) { + if (!zfs_prop_user(propname)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid property '%s'"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + /* + * If this is a user property, make sure it's a + * string, and that it's less than ZAP_MAXNAMELEN. + */ + if (nvpair_type(elem) != DATA_TYPE_STRING) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be a string"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property name '%s' is too long"), + propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + (void) nvpair_value_string(elem, &strval); + if (nvlist_add_string(ret, propname, strval) != 0) { + (void) no_memory(hdl); + goto error; + } + continue; + } + + if (type == ZFS_TYPE_SNAPSHOT) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "this property can not be modified for snapshots")); + (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); + goto error; + } + + if (!zfs_prop_valid_for_type(prop, type)) { + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "'%s' does not " + "apply to datasets of this type"), propname); + (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); + goto error; + } + + if (zfs_prop_readonly(prop) && + (!zfs_prop_setonce(prop) || zhp != NULL)) { + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "'%s' is readonly"), + propname); + (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); + goto error; + } + + if (zprop_parse_value(hdl, elem, prop, type, ret, + &strval, &intval, errbuf) != 0) + goto error; + + /* + * Perform some additional checks for specific properties. + */ + switch (prop) { + case ZFS_PROP_VERSION: + { + int version; + + if (zhp == NULL) + break; + version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); + if (intval < version) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "Can not downgrade; already at version %u"), + version); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + break; + } + + case ZFS_PROP_RECORDSIZE: + case ZFS_PROP_VOLBLOCKSIZE: + /* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */ + if (intval < SPA_MINBLOCKSIZE || + intval > SPA_MAXBLOCKSIZE || !ISP2(intval)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be power of 2 from %u " + "to %uk"), propname, + (uint_t)SPA_MINBLOCKSIZE, + (uint_t)SPA_MAXBLOCKSIZE >> 10); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + break; + + case ZFS_PROP_SHAREISCSI: + if (strcmp(strval, "off") != 0 && + strcmp(strval, "on") != 0 && + strcmp(strval, "type=disk") != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be 'on', 'off', or 'type=disk'"), + propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + break; + + case ZFS_PROP_MOUNTPOINT: + { + namecheck_err_t why; + + if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 || + strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0) + break; + + if (mountpoint_namecheck(strval, &why)) { + switch (why) { + case NAME_ERR_LEADING_SLASH: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, + "'%s' must be an absolute path, " + "'none', or 'legacy'"), propname); + break; + case NAME_ERR_TOOLONG: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, + "component of '%s' is too long"), + propname); + break; + } + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + } + + /*FALLTHRU*/ + + case ZFS_PROP_SHARESMB: + case ZFS_PROP_SHARENFS: + /* + * For the mountpoint and sharenfs or sharesmb + * properties, check if it can be set in a + * global/non-global zone based on + * the zoned property value: + * + * global zone non-global zone + * -------------------------------------------------- + * zoned=on mountpoint (no) mountpoint (yes) + * sharenfs (no) sharenfs (no) + * sharesmb (no) sharesmb (no) + * + * zoned=off mountpoint (yes) N/A + * sharenfs (yes) + * sharesmb (yes) + */ + if (zoned) { + if (getzoneid() == GLOBAL_ZONEID) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be set on " + "dataset in a non-global zone"), + propname); + (void) zfs_error(hdl, EZFS_ZONED, + errbuf); + goto error; + } else if (prop == ZFS_PROP_SHARENFS || + prop == ZFS_PROP_SHARESMB) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be set in " + "a non-global zone"), propname); + (void) zfs_error(hdl, EZFS_ZONED, + errbuf); + goto error; + } + } else if (getzoneid() != GLOBAL_ZONEID) { + /* + * If zoned property is 'off', this must be in + * a globle zone. If not, something is wrong. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be set while dataset " + "'zoned' property is set"), propname); + (void) zfs_error(hdl, EZFS_ZONED, errbuf); + goto error; + } + + /* + * At this point, it is legitimate to set the + * property. Now we want to make sure that the + * property value is valid if it is sharenfs. + */ + if ((prop == ZFS_PROP_SHARENFS || + prop == ZFS_PROP_SHARESMB) && + strcmp(strval, "on") != 0 && + strcmp(strval, "off") != 0) { + zfs_share_proto_t proto; + + if (prop == ZFS_PROP_SHARESMB) + proto = PROTO_SMB; + else + proto = PROTO_NFS; + + /* + * Must be an valid sharing protocol + * option string so init the libshare + * in order to enable the parser and + * then parse the options. We use the + * control API since we don't care about + * the current configuration and don't + * want the overhead of loading it + * until we actually do something. + */ + + if (zfs_init_libshare(hdl, + SA_INIT_CONTROL_API) != SA_OK) { + /* + * An error occurred so we can't do + * anything + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be set: problem " + "in share initialization"), + propname); + (void) zfs_error(hdl, EZFS_BADPROP, + errbuf); + goto error; + } + + if (zfs_parse_options(strval, proto) != SA_OK) { + /* + * There was an error in parsing so + * deal with it by issuing an error + * message and leaving after + * uninitializing the the libshare + * interface. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be set to invalid " + "options"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, + errbuf); + zfs_uninit_libshare(hdl); + goto error; + } + zfs_uninit_libshare(hdl); + } + + break; + case ZFS_PROP_UTF8ONLY: + chosen_utf = (int)intval; + break; + case ZFS_PROP_NORMALIZE: + chosen_normal = (int)intval; + break; + } + + /* + * For changes to existing volumes, we have some additional + * checks to enforce. + */ + if (type == ZFS_TYPE_VOLUME && zhp != NULL) { + uint64_t volsize = zfs_prop_get_int(zhp, + ZFS_PROP_VOLSIZE); + uint64_t blocksize = zfs_prop_get_int(zhp, + ZFS_PROP_VOLBLOCKSIZE); + char buf[64]; + + switch (prop) { + case ZFS_PROP_RESERVATION: + case ZFS_PROP_REFRESERVATION: + if (intval > volsize) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' is greater than current " + "volume size"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, + errbuf); + goto error; + } + break; + + case ZFS_PROP_VOLSIZE: + if (intval % blocksize != 0) { + zfs_nicenum(blocksize, buf, + sizeof (buf)); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be a multiple of " + "volume block size (%s)"), + propname, buf); + (void) zfs_error(hdl, EZFS_BADPROP, + errbuf); + goto error; + } + + if (intval == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' cannot be zero"), + propname); + (void) zfs_error(hdl, EZFS_BADPROP, + errbuf); + goto error; + } + break; + } + } + } + + /* + * If normalization was chosen, but no UTF8 choice was made, + * enforce rejection of non-UTF8 names. + * + * If normalization was chosen, but rejecting non-UTF8 names + * was explicitly not chosen, it is an error. + */ + if (chosen_normal > 0 && chosen_utf < 0) { + if (nvlist_add_uint64(ret, + zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) { + (void) no_memory(hdl); + goto error; + } + } else if (chosen_normal > 0 && chosen_utf == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be set 'on' if normalization chosen"), + zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + /* + * If this is an existing volume, and someone is setting the volsize, + * make sure that it matches the reservation, or add it if necessary. + */ + if (zhp != NULL && type == ZFS_TYPE_VOLUME && + nvlist_lookup_uint64(ret, zfs_prop_to_name(ZFS_PROP_VOLSIZE), + &intval) == 0) { + uint64_t old_volsize = zfs_prop_get_int(zhp, + ZFS_PROP_VOLSIZE); + uint64_t old_reservation; + uint64_t new_reservation; + zfs_prop_t resv_prop; + + if (zfs_which_resv_prop(zhp, &resv_prop) < 0) + goto error; + old_reservation = zfs_prop_get_int(zhp, resv_prop); + + if (old_volsize == old_reservation && + nvlist_lookup_uint64(ret, zfs_prop_to_name(resv_prop), + &new_reservation) != 0) { + if (nvlist_add_uint64(ret, + zfs_prop_to_name(resv_prop), intval) != 0) { + (void) no_memory(hdl); + goto error; + } + } + } + return (ret); + +error: + nvlist_free(ret); + return (NULL); +} + +static int +zfs_get_perm_who(const char *who, zfs_deleg_who_type_t *who_type, + uint64_t *ret_who) +{ + struct passwd *pwd; + struct group *grp; + uid_t id; + + if (*who_type == ZFS_DELEG_EVERYONE || *who_type == ZFS_DELEG_CREATE || + *who_type == ZFS_DELEG_NAMED_SET) { + *ret_who = -1; + return (0); + } + if (who == NULL && !(*who_type == ZFS_DELEG_EVERYONE)) + return (EZFS_BADWHO); + + if (*who_type == ZFS_DELEG_WHO_UNKNOWN && + strcmp(who, "everyone") == 0) { + *ret_who = -1; + *who_type = ZFS_DELEG_EVERYONE; + return (0); + } + + pwd = getpwnam(who); + grp = getgrnam(who); + + if ((*who_type == ZFS_DELEG_USER) && pwd) { + *ret_who = pwd->pw_uid; + } else if ((*who_type == ZFS_DELEG_GROUP) && grp) { + *ret_who = grp->gr_gid; + } else if (pwd) { + *ret_who = pwd->pw_uid; + *who_type = ZFS_DELEG_USER; + } else if (grp) { + *ret_who = grp->gr_gid; + *who_type = ZFS_DELEG_GROUP; + } else { + char *end; + + id = strtol(who, &end, 10); + if (errno != 0 || *end != '\0') { + return (EZFS_BADWHO); + } else { + *ret_who = id; + if (*who_type == ZFS_DELEG_WHO_UNKNOWN) + *who_type = ZFS_DELEG_USER; + } + } + + return (0); +} + +static void +zfs_perms_add_to_nvlist(nvlist_t *who_nvp, char *name, nvlist_t *perms_nvp) +{ + if (perms_nvp != NULL) { + verify(nvlist_add_nvlist(who_nvp, + name, perms_nvp) == 0); + } else { + verify(nvlist_add_boolean(who_nvp, name) == 0); + } +} + +static void +helper(zfs_deleg_who_type_t who_type, uint64_t whoid, char *whostr, + zfs_deleg_inherit_t inherit, nvlist_t *who_nvp, nvlist_t *perms_nvp, + nvlist_t *sets_nvp) +{ + boolean_t do_perms, do_sets; + char name[ZFS_MAX_DELEG_NAME]; + + do_perms = (nvlist_next_nvpair(perms_nvp, NULL) != NULL); + do_sets = (nvlist_next_nvpair(sets_nvp, NULL) != NULL); + + if (!do_perms && !do_sets) + do_perms = do_sets = B_TRUE; + + if (do_perms) { + zfs_deleg_whokey(name, who_type, inherit, + (who_type == ZFS_DELEG_NAMED_SET) ? + whostr : (void *)&whoid); + zfs_perms_add_to_nvlist(who_nvp, name, perms_nvp); + } + if (do_sets) { + zfs_deleg_whokey(name, toupper(who_type), inherit, + (who_type == ZFS_DELEG_NAMED_SET) ? + whostr : (void *)&whoid); + zfs_perms_add_to_nvlist(who_nvp, name, sets_nvp); + } +} + +static void +zfs_perms_add_who_nvlist(nvlist_t *who_nvp, uint64_t whoid, void *whostr, + nvlist_t *perms_nvp, nvlist_t *sets_nvp, + zfs_deleg_who_type_t who_type, zfs_deleg_inherit_t inherit) +{ + if (who_type == ZFS_DELEG_NAMED_SET || who_type == ZFS_DELEG_CREATE) { + helper(who_type, whoid, whostr, 0, + who_nvp, perms_nvp, sets_nvp); + } else { + if (inherit & ZFS_DELEG_PERM_LOCAL) { + helper(who_type, whoid, whostr, ZFS_DELEG_LOCAL, + who_nvp, perms_nvp, sets_nvp); + } + if (inherit & ZFS_DELEG_PERM_DESCENDENT) { + helper(who_type, whoid, whostr, ZFS_DELEG_DESCENDENT, + who_nvp, perms_nvp, sets_nvp); + } + } +} + +/* + * Construct nvlist to pass down to kernel for setting/removing permissions. + * + * The nvlist is constructed as a series of nvpairs with an optional embedded + * nvlist of permissions to remove or set. The topmost nvpairs are the actual + * base attribute named stored in the dsl. + * Arguments: + * + * whostr: is a comma separated list of users, groups, or a single set name. + * whostr may be null for everyone or create perms. + * who_type: is the type of entry in whostr. Typically this will be + * ZFS_DELEG_WHO_UNKNOWN. + * perms: common separated list of permissions. May be null if user + * is requested to remove permissions by who. + * inherit: Specifies the inheritance of the permissions. Will be either + * ZFS_DELEG_PERM_LOCAL and/or ZFS_DELEG_PERM_DESCENDENT. + * nvp The constructed nvlist to pass to zfs_perm_set(). + * The output nvp will look something like this. + * ul$1234 -> {create ; destroy } + * Ul$1234 -> { @myset } + * s-$@myset - { snapshot; checksum; compression } + */ +int +zfs_build_perms(zfs_handle_t *zhp, char *whostr, char *perms, + zfs_deleg_who_type_t who_type, zfs_deleg_inherit_t inherit, nvlist_t **nvp) +{ + nvlist_t *who_nvp; + nvlist_t *perms_nvp = NULL; + nvlist_t *sets_nvp = NULL; + char errbuf[1024]; + char *who_tok, *perm; + int error; + + *nvp = NULL; + + if (perms) { + if ((error = nvlist_alloc(&perms_nvp, + NV_UNIQUE_NAME, 0)) != 0) { + return (1); + } + if ((error = nvlist_alloc(&sets_nvp, + NV_UNIQUE_NAME, 0)) != 0) { + nvlist_free(perms_nvp); + return (1); + } + } + + if ((error = nvlist_alloc(&who_nvp, NV_UNIQUE_NAME, 0)) != 0) { + if (perms_nvp) + nvlist_free(perms_nvp); + if (sets_nvp) + nvlist_free(sets_nvp); + return (1); + } + + if (who_type == ZFS_DELEG_NAMED_SET) { + namecheck_err_t why; + char what; + + if ((error = permset_namecheck(whostr, &why, &what)) != 0) { + nvlist_free(who_nvp); + if (perms_nvp) + nvlist_free(perms_nvp); + if (sets_nvp) + nvlist_free(sets_nvp); + + switch (why) { + case NAME_ERR_NO_AT: + zfs_error_aux(zhp->zfs_hdl, + dgettext(TEXT_DOMAIN, + "set definition must begin with an '@' " + "character")); + } + return (zfs_error(zhp->zfs_hdl, + EZFS_BADPERMSET, whostr)); + } + } + + /* + * Build up nvlist(s) of permissions. Two nvlists are maintained. + * The first nvlist perms_nvp will have normal permissions and the + * other sets_nvp will have only permssion set names in it. + */ + for (perm = strtok(perms, ","); perm; perm = strtok(NULL, ",")) { + const char *perm_canonical = zfs_deleg_canonicalize_perm(perm); + + if (perm_canonical) { + verify(nvlist_add_boolean(perms_nvp, + perm_canonical) == 0); + } else if (perm[0] == '@') { + verify(nvlist_add_boolean(sets_nvp, perm) == 0); + } else { + nvlist_free(who_nvp); + nvlist_free(perms_nvp); + nvlist_free(sets_nvp); + return (zfs_error(zhp->zfs_hdl, EZFS_BADPERM, perm)); + } + } + + if (whostr && who_type != ZFS_DELEG_CREATE) { + who_tok = strtok(whostr, ","); + if (who_tok == NULL) { + nvlist_free(who_nvp); + if (perms_nvp) + nvlist_free(perms_nvp); + if (sets_nvp) + nvlist_free(sets_nvp); + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "Who string is NULL"), + whostr); + return (zfs_error(zhp->zfs_hdl, EZFS_BADWHO, errbuf)); + } + } + + /* + * Now create the nvlist(s) + */ + do { + uint64_t who_id; + + error = zfs_get_perm_who(who_tok, &who_type, + &who_id); + if (error) { + nvlist_free(who_nvp); + if (perms_nvp) + nvlist_free(perms_nvp); + if (sets_nvp) + nvlist_free(sets_nvp); + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, + "Unable to determine uid/gid for " + "%s "), who_tok); + return (zfs_error(zhp->zfs_hdl, EZFS_BADWHO, errbuf)); + } + + /* + * add entries for both local and descendent when required + */ + zfs_perms_add_who_nvlist(who_nvp, who_id, who_tok, + perms_nvp, sets_nvp, who_type, inherit); + + } while (who_tok = strtok(NULL, ",")); + *nvp = who_nvp; + return (0); +} + +static int +zfs_perm_set_common(zfs_handle_t *zhp, nvlist_t *nvp, boolean_t unset) +{ + zfs_cmd_t zc = { 0 }; + int error; + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "Cannot update 'allows' for '%s'"), + zhp->zfs_name); + + if (zcmd_write_src_nvlist(zhp->zfs_hdl, &zc, nvp)) + return (-1); + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + zc.zc_perm_action = unset; + + error = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SET_FSACL, &zc); + if (error && errno == ENOTSUP) { + (void) snprintf(errbuf, sizeof (errbuf), + gettext("Pool must be upgraded to use 'allow/unallow'")); + zcmd_free_nvlists(&zc); + return (zfs_error(zhp->zfs_hdl, EZFS_BADVERSION, errbuf)); + } else if (error) { + return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); + } + zcmd_free_nvlists(&zc); + + return (error); +} + +int +zfs_perm_set(zfs_handle_t *zhp, nvlist_t *nvp) +{ + return (zfs_perm_set_common(zhp, nvp, B_FALSE)); +} + +int +zfs_perm_remove(zfs_handle_t *zhp, nvlist_t *perms) +{ + return (zfs_perm_set_common(zhp, perms, B_TRUE)); +} + +static int +perm_compare(const void *arg1, const void *arg2) +{ + const zfs_perm_node_t *node1 = arg1; + const zfs_perm_node_t *node2 = arg2; + int ret; + + ret = strcmp(node1->z_pname, node2->z_pname); + + if (ret > 0) + return (1); + if (ret < 0) + return (-1); + else + return (0); +} + +static void +zfs_destroy_perm_tree(avl_tree_t *tree) +{ + zfs_perm_node_t *permnode; + void *cookie = NULL; + + while ((permnode = avl_destroy_nodes(tree, &cookie)) != NULL) + free(permnode); + avl_destroy(tree); +} + +static void +zfs_destroy_tree(avl_tree_t *tree) +{ + zfs_allow_node_t *allownode; + void *cookie = NULL; + + while ((allownode = avl_destroy_nodes(tree, &cookie)) != NULL) { + zfs_destroy_perm_tree(&allownode->z_localdescend); + zfs_destroy_perm_tree(&allownode->z_local); + zfs_destroy_perm_tree(&allownode->z_descend); + free(allownode); + } + avl_destroy(tree); +} + +void +zfs_free_allows(zfs_allow_t *allow) +{ + zfs_allow_t *allownext; + zfs_allow_t *freeallow; + + allownext = allow; + while (allownext) { + zfs_destroy_tree(&allownext->z_sets); + zfs_destroy_tree(&allownext->z_crperms); + zfs_destroy_tree(&allownext->z_user); + zfs_destroy_tree(&allownext->z_group); + zfs_destroy_tree(&allownext->z_everyone); + freeallow = allownext; + allownext = allownext->z_next; + free(freeallow); + } +} + +static zfs_allow_t * +zfs_alloc_perm_tree(zfs_handle_t *zhp, zfs_allow_t *prev, char *setpoint) +{ + zfs_allow_t *ptree; + + if ((ptree = zfs_alloc(zhp->zfs_hdl, + sizeof (zfs_allow_t))) == NULL) { + return (NULL); + } + + (void) strlcpy(ptree->z_setpoint, setpoint, sizeof (ptree->z_setpoint)); + avl_create(&ptree->z_sets, + perm_compare, sizeof (zfs_allow_node_t), + offsetof(zfs_allow_node_t, z_node)); + avl_create(&ptree->z_crperms, + perm_compare, sizeof (zfs_allow_node_t), + offsetof(zfs_allow_node_t, z_node)); + avl_create(&ptree->z_user, + perm_compare, sizeof (zfs_allow_node_t), + offsetof(zfs_allow_node_t, z_node)); + avl_create(&ptree->z_group, + perm_compare, sizeof (zfs_allow_node_t), + offsetof(zfs_allow_node_t, z_node)); + avl_create(&ptree->z_everyone, + perm_compare, sizeof (zfs_allow_node_t), + offsetof(zfs_allow_node_t, z_node)); + + if (prev) + prev->z_next = ptree; + ptree->z_next = NULL; + return (ptree); +} + +/* + * Add permissions to the appropriate AVL permission tree. + * The appropriate tree may not be the requested tree. + * For example if ld indicates a local permission, but + * same permission also exists as a descendent permission + * then the permission will be removed from the descendent + * tree and add the the local+descendent tree. + */ +static int +zfs_coalesce_perm(zfs_handle_t *zhp, zfs_allow_node_t *allownode, + char *perm, char ld) +{ + zfs_perm_node_t pnode, *permnode, *permnode2; + zfs_perm_node_t *newnode; + avl_index_t where, where2; + avl_tree_t *tree, *altree; + + (void) strlcpy(pnode.z_pname, perm, sizeof (pnode.z_pname)); + + if (ld == ZFS_DELEG_NA) { + tree = &allownode->z_localdescend; + altree = &allownode->z_descend; + } else if (ld == ZFS_DELEG_LOCAL) { + tree = &allownode->z_local; + altree = &allownode->z_descend; + } else { + tree = &allownode->z_descend; + altree = &allownode->z_local; + } + permnode = avl_find(tree, &pnode, &where); + permnode2 = avl_find(altree, &pnode, &where2); + + if (permnode2) { + avl_remove(altree, permnode2); + free(permnode2); + if (permnode == NULL) { + tree = &allownode->z_localdescend; + } + } + + /* + * Now insert new permission in either requested location + * local/descendent or into ld when perm will exist in both. + */ + if (permnode == NULL) { + if ((newnode = zfs_alloc(zhp->zfs_hdl, + sizeof (zfs_perm_node_t))) == NULL) { + return (-1); + } + *newnode = pnode; + avl_add(tree, newnode); + } + return (0); +} + +/* + * Uggh, this is going to be a bit complicated. + * we have an nvlist coming out of the kernel that + * will indicate where the permission is set and then + * it will contain allow of the various "who's", and what + * their permissions are. To further complicate this + * we will then have to coalesce the local,descendent + * and local+descendent permissions where appropriate. + * The kernel only knows about a permission as being local + * or descendent, but not both. + * + * In order to make this easier for zfs_main to deal with + * a series of AVL trees will be used to maintain + * all of this, primarily for sorting purposes as well + * as the ability to quickly locate a specific entry. + * + * What we end up with are tree's for sets, create perms, + * user, groups and everyone. With each of those trees + * we have subtrees for local, descendent and local+descendent + * permissions. + */ +int +zfs_perm_get(zfs_handle_t *zhp, zfs_allow_t **zfs_perms) +{ + zfs_cmd_t zc = { 0 }; + int error; + nvlist_t *nvlist; + nvlist_t *permnv, *sourcenv; + nvpair_t *who_pair, *source_pair; + nvpair_t *perm_pair; + char errbuf[1024]; + zfs_allow_t *zallowp, *newallowp; + char ld; + char *nvpname; + uid_t uid; + gid_t gid; + avl_tree_t *tree; + avl_index_t where; + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) + return (-1); + + while (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) { + if (errno == ENOMEM) { + if (zcmd_expand_dst_nvlist(zhp->zfs_hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + } else if (errno == ENOTSUP) { + zcmd_free_nvlists(&zc); + (void) snprintf(errbuf, sizeof (errbuf), + gettext("Pool must be upgraded to use 'allow'")); + return (zfs_error(zhp->zfs_hdl, + EZFS_BADVERSION, errbuf)); + } else { + zcmd_free_nvlists(&zc); + return (-1); + } + } + + if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &nvlist) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + + zcmd_free_nvlists(&zc); + + source_pair = nvlist_next_nvpair(nvlist, NULL); + + if (source_pair == NULL) { + *zfs_perms = NULL; + return (0); + } + + *zfs_perms = zfs_alloc_perm_tree(zhp, NULL, nvpair_name(source_pair)); + if (*zfs_perms == NULL) { + return (0); + } + + zallowp = *zfs_perms; + + for (;;) { + struct passwd *pwd; + struct group *grp; + zfs_allow_node_t *allownode; + zfs_allow_node_t findallownode; + zfs_allow_node_t *newallownode; + + (void) strlcpy(zallowp->z_setpoint, + nvpair_name(source_pair), + sizeof (zallowp->z_setpoint)); + + if ((error = nvpair_value_nvlist(source_pair, &sourcenv)) != 0) + goto abort; + + /* + * Make sure nvlist is composed correctly + */ + if (zfs_deleg_verify_nvlist(sourcenv)) { + goto abort; + } + + who_pair = nvlist_next_nvpair(sourcenv, NULL); + if (who_pair == NULL) { + goto abort; + } + + do { + error = nvpair_value_nvlist(who_pair, &permnv); + if (error) { + goto abort; + } + + /* + * First build up the key to use + * for looking up in the various + * who trees. + */ + ld = nvpair_name(who_pair)[1]; + nvpname = nvpair_name(who_pair); + switch (nvpair_name(who_pair)[0]) { + case ZFS_DELEG_USER: + case ZFS_DELEG_USER_SETS: + tree = &zallowp->z_user; + uid = atol(&nvpname[3]); + pwd = getpwuid(uid); + (void) snprintf(findallownode.z_key, + sizeof (findallownode.z_key), "user %s", + (pwd) ? pwd->pw_name : + &nvpair_name(who_pair)[3]); + break; + case ZFS_DELEG_GROUP: + case ZFS_DELEG_GROUP_SETS: + tree = &zallowp->z_group; + gid = atol(&nvpname[3]); + grp = getgrgid(gid); + (void) snprintf(findallownode.z_key, + sizeof (findallownode.z_key), "group %s", + (grp) ? grp->gr_name : + &nvpair_name(who_pair)[3]); + break; + case ZFS_DELEG_CREATE: + case ZFS_DELEG_CREATE_SETS: + tree = &zallowp->z_crperms; + (void) strlcpy(findallownode.z_key, "", + sizeof (findallownode.z_key)); + break; + case ZFS_DELEG_EVERYONE: + case ZFS_DELEG_EVERYONE_SETS: + (void) snprintf(findallownode.z_key, + sizeof (findallownode.z_key), "everyone"); + tree = &zallowp->z_everyone; + break; + case ZFS_DELEG_NAMED_SET: + case ZFS_DELEG_NAMED_SET_SETS: + (void) snprintf(findallownode.z_key, + sizeof (findallownode.z_key), "%s", + &nvpair_name(who_pair)[3]); + tree = &zallowp->z_sets; + break; + } + + /* + * Place who in tree + */ + allownode = avl_find(tree, &findallownode, &where); + if (allownode == NULL) { + if ((newallownode = zfs_alloc(zhp->zfs_hdl, + sizeof (zfs_allow_node_t))) == NULL) { + goto abort; + } + avl_create(&newallownode->z_localdescend, + perm_compare, + sizeof (zfs_perm_node_t), + offsetof(zfs_perm_node_t, z_node)); + avl_create(&newallownode->z_local, + perm_compare, + sizeof (zfs_perm_node_t), + offsetof(zfs_perm_node_t, z_node)); + avl_create(&newallownode->z_descend, + perm_compare, + sizeof (zfs_perm_node_t), + offsetof(zfs_perm_node_t, z_node)); + (void) strlcpy(newallownode->z_key, + findallownode.z_key, + sizeof (findallownode.z_key)); + avl_insert(tree, newallownode, where); + allownode = newallownode; + } + + /* + * Now iterate over the permissions and + * place them in the appropriate local, + * descendent or local+descendent tree. + * + * The permissions are added to the tree + * via zfs_coalesce_perm(). + */ + perm_pair = nvlist_next_nvpair(permnv, NULL); + if (perm_pair == NULL) + goto abort; + do { + if (zfs_coalesce_perm(zhp, allownode, + nvpair_name(perm_pair), ld) != 0) + goto abort; + } while (perm_pair = nvlist_next_nvpair(permnv, + perm_pair)); + } while (who_pair = nvlist_next_nvpair(sourcenv, who_pair)); + + source_pair = nvlist_next_nvpair(nvlist, source_pair); + if (source_pair == NULL) + break; + + /* + * allocate another node from the link list of + * zfs_allow_t structures + */ + newallowp = zfs_alloc_perm_tree(zhp, zallowp, + nvpair_name(source_pair)); + if (newallowp == NULL) { + goto abort; + } + zallowp = newallowp; + } + nvlist_free(nvlist); + return (0); +abort: + zfs_free_allows(*zfs_perms); + nvlist_free(nvlist); + return (-1); +} + +static char * +zfs_deleg_perm_note(zfs_deleg_note_t note) +{ + /* + * Don't put newlines on end of lines + */ + switch (note) { + case ZFS_DELEG_NOTE_CREATE: + return (dgettext(TEXT_DOMAIN, + "Must also have the 'mount' ability")); + case ZFS_DELEG_NOTE_DESTROY: + return (dgettext(TEXT_DOMAIN, + "Must also have the 'mount' ability")); + case ZFS_DELEG_NOTE_SNAPSHOT: + return (dgettext(TEXT_DOMAIN, + "Must also have the 'mount' ability")); + case ZFS_DELEG_NOTE_ROLLBACK: + return (dgettext(TEXT_DOMAIN, + "Must also have the 'mount' ability")); + case ZFS_DELEG_NOTE_CLONE: + return (dgettext(TEXT_DOMAIN, "Must also have the 'create' " + "ability and 'mount'\n" + "\t\t\t\tability in the origin file system")); + case ZFS_DELEG_NOTE_PROMOTE: + return (dgettext(TEXT_DOMAIN, "Must also have the 'mount'\n" + "\t\t\t\tand 'promote' ability in the origin file system")); + case ZFS_DELEG_NOTE_RENAME: + return (dgettext(TEXT_DOMAIN, "Must also have the 'mount' " + "and 'create' \n\t\t\t\tability in the new parent")); + case ZFS_DELEG_NOTE_RECEIVE: + return (dgettext(TEXT_DOMAIN, "Must also have the 'mount'" + " and 'create' ability")); + case ZFS_DELEG_NOTE_USERPROP: + return (dgettext(TEXT_DOMAIN, + "Allows changing any user property")); + case ZFS_DELEG_NOTE_ALLOW: + return (dgettext(TEXT_DOMAIN, + "Must also have the permission that is being\n" + "\t\t\t\tallowed")); + case ZFS_DELEG_NOTE_MOUNT: + return (dgettext(TEXT_DOMAIN, + "Allows mount/umount of ZFS datasets")); + case ZFS_DELEG_NOTE_SHARE: + return (dgettext(TEXT_DOMAIN, + "Allows sharing file systems over NFS or SMB\n" + "\t\t\t\tprotocols")); + case ZFS_DELEG_NOTE_NONE: + default: + return (dgettext(TEXT_DOMAIN, "")); + } +} + +typedef enum { + ZFS_DELEG_SUBCOMMAND, + ZFS_DELEG_PROP, + ZFS_DELEG_OTHER +} zfs_deleg_perm_type_t; + +/* + * is the permission a subcommand or other? + */ +zfs_deleg_perm_type_t +zfs_deleg_perm_type(const char *perm) +{ + if (strcmp(perm, "userprop") == 0) + return (ZFS_DELEG_OTHER); + else + return (ZFS_DELEG_SUBCOMMAND); +} + +static char * +zfs_deleg_perm_type_str(zfs_deleg_perm_type_t type) +{ + switch (type) { + case ZFS_DELEG_SUBCOMMAND: + return (dgettext(TEXT_DOMAIN, "subcommand")); + case ZFS_DELEG_PROP: + return (dgettext(TEXT_DOMAIN, "property")); + case ZFS_DELEG_OTHER: + return (dgettext(TEXT_DOMAIN, "other")); + } + return (""); +} + +/*ARGSUSED*/ +static int +zfs_deleg_prop_cb(int prop, void *cb) +{ + if (zfs_prop_delegatable(prop)) + (void) fprintf(stderr, "%-15s %-15s\n", zfs_prop_to_name(prop), + zfs_deleg_perm_type_str(ZFS_DELEG_PROP)); + + return (ZPROP_CONT); +} + +void +zfs_deleg_permissions(void) +{ + int i; + + (void) fprintf(stderr, "\n%-15s %-15s\t%s\n\n", "NAME", + "TYPE", "NOTES"); + + /* + * First print out the subcommands + */ + for (i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) { + (void) fprintf(stderr, "%-15s %-15s\t%s\n", + zfs_deleg_perm_tab[i].z_perm, + zfs_deleg_perm_type_str( + zfs_deleg_perm_type(zfs_deleg_perm_tab[i].z_perm)), + zfs_deleg_perm_note(zfs_deleg_perm_tab[i].z_note)); + } + + (void) zprop_iter(zfs_deleg_prop_cb, NULL, B_FALSE, B_TRUE, + ZFS_TYPE_DATASET|ZFS_TYPE_VOLUME); +} + +/* + * Given a property name and value, set the property for the given dataset. + */ +int +zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval) +{ + zfs_cmd_t zc = { 0 }; + int ret = -1; + prop_changelist_t *cl = NULL; + char errbuf[1024]; + libzfs_handle_t *hdl = zhp->zfs_hdl; + nvlist_t *nvl = NULL, *realprops; + zfs_prop_t prop; + boolean_t do_prefix; + uint64_t idx; + + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), + zhp->zfs_name); + + if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 || + nvlist_add_string(nvl, propname, propval) != 0) { + (void) no_memory(hdl); + goto error; + } + + if ((realprops = zfs_valid_proplist(hdl, zhp->zfs_type, nvl, + zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, errbuf)) == NULL) + goto error; + + nvlist_free(nvl); + nvl = realprops; + + prop = zfs_name_to_prop(propname); + + if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL) + goto error; + + if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "child dataset with inherited mountpoint is used " + "in a non-global zone")); + ret = zfs_error(hdl, EZFS_ZONED, errbuf); + goto error; + } + + /* + * If the dataset's canmount property is being set to noauto, + * then we want to prevent unmounting & remounting it. + */ + do_prefix = !((prop == ZFS_PROP_CANMOUNT) && + (zprop_string_to_index(prop, propval, &idx, + ZFS_TYPE_DATASET) == 0) && (idx == ZFS_CANMOUNT_NOAUTO)); + + if (do_prefix && (ret = changelist_prefix(cl)) != 0) + goto error; + + /* + * Execute the corresponding ioctl() to set this property. + */ + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0) + goto error; + + ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); + if (ret != 0) { + switch (errno) { + + case ENOSPC: + /* + * For quotas and reservations, ENOSPC indicates + * something different; setting a quota or reservation + * doesn't use any disk space. + */ + switch (prop) { + case ZFS_PROP_QUOTA: + case ZFS_PROP_REFQUOTA: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "size is less than current used or " + "reserved space")); + (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); + break; + + case ZFS_PROP_RESERVATION: + case ZFS_PROP_REFRESERVATION: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "size is greater than available space")); + (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); + break; + + default: + (void) zfs_standard_error(hdl, errno, errbuf); + break; + } + break; + + case EBUSY: + if (prop == ZFS_PROP_VOLBLOCKSIZE) + (void) zfs_error(hdl, EZFS_VOLHASDATA, errbuf); + else + (void) zfs_standard_error(hdl, EBUSY, errbuf); + break; + + case EROFS: + (void) zfs_error(hdl, EZFS_DSREADONLY, errbuf); + break; + + case ENOTSUP: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool and or dataset must be upgraded to set this " + "property or value")); + (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); + break; + + case ERANGE: + if (prop == ZFS_PROP_COMPRESSION) { + (void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property setting is not allowed on " + "bootable datasets")); + (void) zfs_error(hdl, EZFS_NOTSUP, errbuf); + } else { + (void) zfs_standard_error(hdl, errno, errbuf); + } + break; + + case EOVERFLOW: + /* + * This platform can't address a volume this big. + */ +#ifdef _ILP32 + if (prop == ZFS_PROP_VOLSIZE) { + (void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf); + break; + } +#endif + /* FALLTHROUGH */ + default: + (void) zfs_standard_error(hdl, errno, errbuf); + } + } else { + if (do_prefix) + ret = changelist_postfix(cl); + + /* + * Refresh the statistics so the new property value + * is reflected. + */ + if (ret == 0) + (void) get_stats(zhp); + } + +error: + nvlist_free(nvl); + zcmd_free_nvlists(&zc); + if (cl) + changelist_free(cl); + return (ret); +} + +/* + * Given a property, inherit the value from the parent dataset. + */ +int +zfs_prop_inherit(zfs_handle_t *zhp, const char *propname) +{ + zfs_cmd_t zc = { 0 }; + int ret; + prop_changelist_t *cl; + libzfs_handle_t *hdl = zhp->zfs_hdl; + char errbuf[1024]; + zfs_prop_t prop; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot inherit %s for '%s'"), propname, zhp->zfs_name); + + if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) { + /* + * For user properties, the amount of work we have to do is very + * small, so just do it here. + */ + if (!zfs_prop_user(propname)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid property")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + } + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); + + if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0) + return (zfs_standard_error(hdl, errno, errbuf)); + + return (0); + } + + /* + * Verify that this property is inheritable. + */ + if (zfs_prop_readonly(prop)) + return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf)); + + if (!zfs_prop_inheritable(prop)) + return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf)); + + /* + * Check to see if the value applies to this type + */ + if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) + return (zfs_error(hdl, EZFS_PROPTYPE, errbuf)); + + /* + * Normalize the name, to get rid of shorthand abbrevations. + */ + propname = zfs_prop_to_name(prop); + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); + + if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID && + zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset is used in a non-global zone")); + return (zfs_error(hdl, EZFS_ZONED, errbuf)); + } + + /* + * Determine datasets which will be affected by this change, if any. + */ + if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL) + return (-1); + + if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "child dataset with inherited mountpoint is used " + "in a non-global zone")); + ret = zfs_error(hdl, EZFS_ZONED, errbuf); + goto error; + } + + if ((ret = changelist_prefix(cl)) != 0) + goto error; + + if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) { + return (zfs_standard_error(hdl, errno, errbuf)); + } else { + + if ((ret = changelist_postfix(cl)) != 0) + goto error; + + /* + * Refresh the statistics so the new property is reflected. + */ + (void) get_stats(zhp); + } + +error: + changelist_free(cl); + return (ret); +} + +/* + * True DSL properties are stored in an nvlist. The following two functions + * extract them appropriately. + */ +static uint64_t +getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source) +{ + nvlist_t *nv; + uint64_t value; + + *source = NULL; + if (nvlist_lookup_nvlist(zhp->zfs_props, + zfs_prop_to_name(prop), &nv) == 0) { + verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); + (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); + } else { + value = zfs_prop_default_numeric(prop); + *source = ""; + } + + return (value); +} + +static char * +getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source) +{ + nvlist_t *nv; + char *value; + + *source = NULL; + if (nvlist_lookup_nvlist(zhp->zfs_props, + zfs_prop_to_name(prop), &nv) == 0) { + verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); + (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); + } else { + if ((value = (char *)zfs_prop_default_string(prop)) == NULL) + value = ""; + *source = ""; + } + + return (value); +} + +/* + * Internal function for getting a numeric property. Both zfs_prop_get() and + * zfs_prop_get_int() are built using this interface. + * + * Certain properties can be overridden using 'mount -o'. In this case, scan + * the contents of the /etc/mnttab entry, searching for the appropriate options. + * If they differ from the on-disk values, report the current values and mark + * the source "temporary". + */ +static int +get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src, + char **source, uint64_t *val) +{ + zfs_cmd_t zc = { 0 }; + nvlist_t *zplprops = NULL; + struct mnttab mnt; + char *mntopt_on = NULL; + char *mntopt_off = NULL; + + *source = NULL; + + switch (prop) { + case ZFS_PROP_ATIME: + mntopt_on = MNTOPT_ATIME; + mntopt_off = MNTOPT_NOATIME; + break; + + case ZFS_PROP_DEVICES: + mntopt_on = MNTOPT_DEVICES; + mntopt_off = MNTOPT_NODEVICES; + break; + + case ZFS_PROP_EXEC: + mntopt_on = MNTOPT_EXEC; + mntopt_off = MNTOPT_NOEXEC; + break; + + case ZFS_PROP_READONLY: + mntopt_on = MNTOPT_RO; + mntopt_off = MNTOPT_RW; + break; + + case ZFS_PROP_SETUID: + mntopt_on = MNTOPT_SETUID; + mntopt_off = MNTOPT_NOSETUID; + break; + + case ZFS_PROP_XATTR: + mntopt_on = MNTOPT_XATTR; + mntopt_off = MNTOPT_NOXATTR; + break; + + case ZFS_PROP_NBMAND: + mntopt_on = MNTOPT_NBMAND; + mntopt_off = MNTOPT_NONBMAND; + break; + } + + /* + * Because looking up the mount options is potentially expensive + * (iterating over all of /etc/mnttab), we defer its calculation until + * we're looking up a property which requires its presence. + */ + if (!zhp->zfs_mntcheck && + (mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) { + struct mnttab entry, search = { 0 }; + FILE *mnttab = zhp->zfs_hdl->libzfs_mnttab; + + search.mnt_special = (char *)zhp->zfs_name; + search.mnt_fstype = MNTTYPE_ZFS; + rewind(mnttab); + + if (getmntany(mnttab, &entry, &search) == 0) { + zhp->zfs_mntopts = zfs_strdup(zhp->zfs_hdl, + entry.mnt_mntopts); + if (zhp->zfs_mntopts == NULL) + return (-1); + } + + zhp->zfs_mntcheck = B_TRUE; + } + + if (zhp->zfs_mntopts == NULL) + mnt.mnt_mntopts = ""; + else + mnt.mnt_mntopts = zhp->zfs_mntopts; + + switch (prop) { + case ZFS_PROP_ATIME: + case ZFS_PROP_DEVICES: + case ZFS_PROP_EXEC: + case ZFS_PROP_READONLY: + case ZFS_PROP_SETUID: + case ZFS_PROP_XATTR: + case ZFS_PROP_NBMAND: + *val = getprop_uint64(zhp, prop, source); + + if (hasmntopt(&mnt, mntopt_on) && !*val) { + *val = B_TRUE; + if (src) + *src = ZPROP_SRC_TEMPORARY; + } else if (hasmntopt(&mnt, mntopt_off) && *val) { + *val = B_FALSE; + if (src) + *src = ZPROP_SRC_TEMPORARY; + } + break; + + case ZFS_PROP_CANMOUNT: + *val = getprop_uint64(zhp, prop, source); + if (*val != ZFS_CANMOUNT_ON) + *source = zhp->zfs_name; + else + *source = ""; /* default */ + break; + + case ZFS_PROP_QUOTA: + case ZFS_PROP_REFQUOTA: + case ZFS_PROP_RESERVATION: + case ZFS_PROP_REFRESERVATION: + *val = getprop_uint64(zhp, prop, source); + if (*val == 0) + *source = ""; /* default */ + else + *source = zhp->zfs_name; + break; + + case ZFS_PROP_MOUNTED: + *val = (zhp->zfs_mntopts != NULL); + break; + + case ZFS_PROP_NUMCLONES: + *val = zhp->zfs_dmustats.dds_num_clones; + break; + + case ZFS_PROP_VERSION: + case ZFS_PROP_NORMALIZE: + case ZFS_PROP_UTF8ONLY: + case ZFS_PROP_CASE: + if (!zfs_prop_valid_for_type(prop, zhp->zfs_head_type) || + zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) + return (-1); + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) { + zcmd_free_nvlists(&zc); + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "unable to get %s property"), + zfs_prop_to_name(prop)); + return (zfs_error(zhp->zfs_hdl, EZFS_BADVERSION, + dgettext(TEXT_DOMAIN, "internal error"))); + } + if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 || + nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop), + val) != 0) { + zcmd_free_nvlists(&zc); + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "unable to get %s property"), + zfs_prop_to_name(prop)); + return (zfs_error(zhp->zfs_hdl, EZFS_NOMEM, + dgettext(TEXT_DOMAIN, "internal error"))); + } + if (zplprops) + nvlist_free(zplprops); + zcmd_free_nvlists(&zc); + break; + + default: + switch (zfs_prop_get_type(prop)) { + case PROP_TYPE_NUMBER: + case PROP_TYPE_INDEX: + *val = getprop_uint64(zhp, prop, source); + /* + * If we tried to use a defalut value for a + * readonly property, it means that it was not + * present; return an error. + */ + if (zfs_prop_readonly(prop) && + *source && (*source)[0] == '\0') { + return (-1); + } + break; + + case PROP_TYPE_STRING: + default: + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "cannot get non-numeric property")); + return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP, + dgettext(TEXT_DOMAIN, "internal error"))); + } + } + + return (0); +} + +/* + * Calculate the source type, given the raw source string. + */ +static void +get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source, + char *statbuf, size_t statlen) +{ + if (statbuf == NULL || *srctype == ZPROP_SRC_TEMPORARY) + return; + + if (source == NULL) { + *srctype = ZPROP_SRC_NONE; + } else if (source[0] == '\0') { + *srctype = ZPROP_SRC_DEFAULT; + } else { + if (strcmp(source, zhp->zfs_name) == 0) { + *srctype = ZPROP_SRC_LOCAL; + } else { + (void) strlcpy(statbuf, source, statlen); + *srctype = ZPROP_SRC_INHERITED; + } + } + +} + +/* + * Retrieve a property from the given object. If 'literal' is specified, then + * numbers are left as exact values. Otherwise, numbers are converted to a + * human-readable form. + * + * Returns 0 on success, or -1 on error. + */ +int +zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen, + zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal) +{ + char *source = NULL; + uint64_t val; + char *str; + const char *strval; + + /* + * Check to see if this property applies to our object + */ + if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) + return (-1); + + if (src) + *src = ZPROP_SRC_NONE; + + switch (prop) { + case ZFS_PROP_CREATION: + /* + * 'creation' is a time_t stored in the statistics. We convert + * this into a string unless 'literal' is specified. + */ + { + val = getprop_uint64(zhp, prop, &source); + time_t time = (time_t)val; + struct tm t; + + if (literal || + localtime_r(&time, &t) == NULL || + strftime(propbuf, proplen, "%a %b %e %k:%M %Y", + &t) == 0) + (void) snprintf(propbuf, proplen, "%llu", val); + } + break; + + case ZFS_PROP_MOUNTPOINT: + /* + * Getting the precise mountpoint can be tricky. + * + * - for 'none' or 'legacy', return those values. + * - for inherited mountpoints, we want to take everything + * after our ancestor and append it to the inherited value. + * + * If the pool has an alternate root, we want to prepend that + * root to any values we return. + */ + + str = getprop_string(zhp, prop, &source); + + if (str[0] == '/') { + char buf[MAXPATHLEN]; + char *root = buf; + const char *relpath = zhp->zfs_name + strlen(source); + + if (relpath[0] == '/') + relpath++; + + if ((zpool_get_prop(zhp->zpool_hdl, + ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL)) || + (strcmp(root, "-") == 0)) + root[0] = '\0'; + /* + * Special case an alternate root of '/'. This will + * avoid having multiple leading slashes in the + * mountpoint path. + */ + if (strcmp(root, "/") == 0) + root++; + + /* + * If the mountpoint is '/' then skip over this + * if we are obtaining either an alternate root or + * an inherited mountpoint. + */ + if (str[1] == '\0' && (root[0] != '\0' || + relpath[0] != '\0')) + str++; + + if (relpath[0] == '\0') + (void) snprintf(propbuf, proplen, "%s%s", + root, str); + else + (void) snprintf(propbuf, proplen, "%s%s%s%s", + root, str, relpath[0] == '@' ? "" : "/", + relpath); + } else { + /* 'legacy' or 'none' */ + (void) strlcpy(propbuf, str, proplen); + } + + break; + + case ZFS_PROP_ORIGIN: + (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), + proplen); + /* + * If there is no parent at all, return failure to indicate that + * it doesn't apply to this dataset. + */ + if (propbuf[0] == '\0') + return (-1); + break; + + case ZFS_PROP_QUOTA: + case ZFS_PROP_REFQUOTA: + case ZFS_PROP_RESERVATION: + case ZFS_PROP_REFRESERVATION: + + if (get_numeric_property(zhp, prop, src, &source, &val) != 0) + return (-1); + + /* + * If quota or reservation is 0, we translate this into 'none' + * (unless literal is set), and indicate that it's the default + * value. Otherwise, we print the number nicely and indicate + * that its set locally. + */ + if (val == 0) { + if (literal) + (void) strlcpy(propbuf, "0", proplen); + else + (void) strlcpy(propbuf, "none", proplen); + } else { + if (literal) + (void) snprintf(propbuf, proplen, "%llu", + (u_longlong_t)val); + else + zfs_nicenum(val, propbuf, proplen); + } + break; + + case ZFS_PROP_COMPRESSRATIO: + if (get_numeric_property(zhp, prop, src, &source, &val) != 0) + return (-1); + (void) snprintf(propbuf, proplen, "%lld.%02lldx", (longlong_t) + val / 100, (longlong_t)val % 100); + break; + + case ZFS_PROP_TYPE: + switch (zhp->zfs_type) { + case ZFS_TYPE_FILESYSTEM: + str = "filesystem"; + break; + case ZFS_TYPE_VOLUME: + str = "volume"; + break; + case ZFS_TYPE_SNAPSHOT: + str = "snapshot"; + break; + default: + abort(); + } + (void) snprintf(propbuf, proplen, "%s", str); + break; + + case ZFS_PROP_MOUNTED: + /* + * The 'mounted' property is a pseudo-property that described + * whether the filesystem is currently mounted. Even though + * it's a boolean value, the typical values of "on" and "off" + * don't make sense, so we translate to "yes" and "no". + */ + if (get_numeric_property(zhp, ZFS_PROP_MOUNTED, + src, &source, &val) != 0) + return (-1); + if (val) + (void) strlcpy(propbuf, "yes", proplen); + else + (void) strlcpy(propbuf, "no", proplen); + break; + + case ZFS_PROP_NAME: + /* + * The 'name' property is a pseudo-property derived from the + * dataset name. It is presented as a real property to simplify + * consumers. + */ + (void) strlcpy(propbuf, zhp->zfs_name, proplen); + break; + + default: + switch (zfs_prop_get_type(prop)) { + case PROP_TYPE_NUMBER: + if (get_numeric_property(zhp, prop, src, + &source, &val) != 0) + return (-1); + if (literal) + (void) snprintf(propbuf, proplen, "%llu", + (u_longlong_t)val); + else + zfs_nicenum(val, propbuf, proplen); + break; + + case PROP_TYPE_STRING: + (void) strlcpy(propbuf, + getprop_string(zhp, prop, &source), proplen); + break; + + case PROP_TYPE_INDEX: + if (get_numeric_property(zhp, prop, src, + &source, &val) != 0) + return (-1); + if (zfs_prop_index_to_string(prop, val, &strval) != 0) + return (-1); + (void) strlcpy(propbuf, strval, proplen); + break; + + default: + abort(); + } + } + + get_source(zhp, src, source, statbuf, statlen); + + return (0); +} + +/* + * Utility function to get the given numeric property. Does no validation that + * the given property is the appropriate type; should only be used with + * hard-coded property types. + */ +uint64_t +zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop) +{ + char *source; + uint64_t val; + + (void) get_numeric_property(zhp, prop, NULL, &source, &val); + + return (val); +} + +int +zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val) +{ + char buf[64]; + + zfs_nicenum(val, buf, sizeof (buf)); + return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf)); +} + +/* + * Similar to zfs_prop_get(), but returns the value as an integer. + */ +int +zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value, + zprop_source_t *src, char *statbuf, size_t statlen) +{ + char *source; + + /* + * Check to see if this property applies to our object + */ + if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) { + return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE, + dgettext(TEXT_DOMAIN, "cannot get property '%s'"), + zfs_prop_to_name(prop))); + } + + if (src) + *src = ZPROP_SRC_NONE; + + if (get_numeric_property(zhp, prop, src, &source, value) != 0) + return (-1); + + get_source(zhp, src, source, statbuf, statlen); + + return (0); +} + +/* + * Returns the name of the given zfs handle. + */ +const char * +zfs_get_name(const zfs_handle_t *zhp) +{ + return (zhp->zfs_name); +} + +/* + * Returns the type of the given zfs handle. + */ +zfs_type_t +zfs_get_type(const zfs_handle_t *zhp) +{ + return (zhp->zfs_type); +} + +/* + * Iterate over all child filesystems + */ +int +zfs_iter_filesystems(zfs_handle_t *zhp, zfs_iter_f func, void *data) +{ + zfs_cmd_t zc = { 0 }; + zfs_handle_t *nzhp; + int ret; + + if (zhp->zfs_type != ZFS_TYPE_FILESYSTEM) + return (0); + + for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0; + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) { + /* + * Ignore private dataset names. + */ + if (dataset_name_hidden(zc.zc_name)) + continue; + + /* + * Silently ignore errors, as the only plausible explanation is + * that the pool has since been removed. + */ + if ((nzhp = make_dataset_handle(zhp->zfs_hdl, + zc.zc_name)) == NULL) + continue; + + if ((ret = func(nzhp, data)) != 0) + return (ret); + } + + /* + * An errno value of ESRCH indicates normal completion. If ENOENT is + * returned, then the underlying dataset has been removed since we + * obtained the handle. + */ + if (errno != ESRCH && errno != ENOENT) + return (zfs_standard_error(zhp->zfs_hdl, errno, + dgettext(TEXT_DOMAIN, "cannot iterate filesystems"))); + + return (0); +} + +/* + * Iterate over all snapshots + */ +int +zfs_iter_snapshots(zfs_handle_t *zhp, zfs_iter_f func, void *data) +{ + zfs_cmd_t zc = { 0 }; + zfs_handle_t *nzhp; + int ret; + + if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) + return (0); + + for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, + &zc) == 0; + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) { + + if ((nzhp = make_dataset_handle(zhp->zfs_hdl, + zc.zc_name)) == NULL) + continue; + + if ((ret = func(nzhp, data)) != 0) + return (ret); + } + + /* + * An errno value of ESRCH indicates normal completion. If ENOENT is + * returned, then the underlying dataset has been removed since we + * obtained the handle. Silently ignore this case, and return success. + */ + if (errno != ESRCH && errno != ENOENT) + return (zfs_standard_error(zhp->zfs_hdl, errno, + dgettext(TEXT_DOMAIN, "cannot iterate filesystems"))); + + return (0); +} + +/* + * Iterate over all children, snapshots and filesystems + */ +int +zfs_iter_children(zfs_handle_t *zhp, zfs_iter_f func, void *data) +{ + int ret; + + if ((ret = zfs_iter_filesystems(zhp, func, data)) != 0) + return (ret); + + return (zfs_iter_snapshots(zhp, func, data)); +} + +/* + * Given a complete name, return just the portion that refers to the parent. + * Can return NULL if this is a pool. + */ +static int +parent_name(const char *path, char *buf, size_t buflen) +{ + char *loc; + + if ((loc = strrchr(path, '/')) == NULL) + return (-1); + + (void) strncpy(buf, path, MIN(buflen, loc - path)); + buf[loc - path] = '\0'; + + return (0); +} + +/* + * If accept_ancestor is false, then check to make sure that the given path has + * a parent, and that it exists. If accept_ancestor is true, then find the + * closest existing ancestor for the given path. In prefixlen return the + * length of already existing prefix of the given path. We also fetch the + * 'zoned' property, which is used to validate property settings when creating + * new datasets. + */ +static int +check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned, + boolean_t accept_ancestor, int *prefixlen) +{ + zfs_cmd_t zc = { 0 }; + char parent[ZFS_MAXNAMELEN]; + char *slash; + zfs_handle_t *zhp; + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), "cannot create '%s'", + path); + + /* get parent, and check to see if this is just a pool */ + if (parent_name(path, parent, sizeof (parent)) != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "missing dataset name")); + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + } + + /* check to see if the pool exists */ + if ((slash = strchr(parent, '/')) == NULL) + slash = parent + strlen(parent); + (void) strncpy(zc.zc_name, parent, slash - parent); + zc.zc_name[slash - parent] = '\0'; + if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 && + errno == ENOENT) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "no such pool '%s'"), zc.zc_name); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } + + /* check to see if the parent dataset exists */ + while ((zhp = make_dataset_handle(hdl, parent)) == NULL) { + if (errno == ENOENT && accept_ancestor) { + /* + * Go deeper to find an ancestor, give up on top level. + */ + if (parent_name(parent, parent, sizeof (parent)) != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "no such pool '%s'"), zc.zc_name); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } + } else if (errno == ENOENT) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "parent does not exist")); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } else + return (zfs_standard_error(hdl, errno, errbuf)); + } + + *zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); + /* we are in a non-global zone, but parent is in the global zone */ + if (getzoneid() != GLOBAL_ZONEID && !(*zoned)) { + (void) zfs_standard_error(hdl, EPERM, errbuf); + zfs_close(zhp); + return (-1); + } + + /* make sure parent is a filesystem */ + if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "parent is not a filesystem")); + (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); + zfs_close(zhp); + return (-1); + } + + zfs_close(zhp); + if (prefixlen != NULL) + *prefixlen = strlen(parent); + return (0); +} + +/* + * Finds whether the dataset of the given type(s) exists. + */ +boolean_t +zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types) +{ + zfs_handle_t *zhp; + + if (!zfs_validate_name(hdl, path, types, B_FALSE)) + return (B_FALSE); + + /* + * Try to get stats for the dataset, which will tell us if it exists. + */ + if ((zhp = make_dataset_handle(hdl, path)) != NULL) { + int ds_type = zhp->zfs_type; + + zfs_close(zhp); + if (types & ds_type) + return (B_TRUE); + } + return (B_FALSE); +} + +/* + * Given a path to 'target', create all the ancestors between + * the prefixlen portion of the path, and the target itself. + * Fail if the initial prefixlen-ancestor does not already exist. + */ +int +create_parents(libzfs_handle_t *hdl, char *target, int prefixlen) +{ + zfs_handle_t *h; + char *cp; + const char *opname; + + /* make sure prefix exists */ + cp = target + prefixlen; + if (*cp != '/') { + assert(strchr(cp, '/') == NULL); + h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); + } else { + *cp = '\0'; + h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); + *cp = '/'; + } + if (h == NULL) + return (-1); + zfs_close(h); + + /* + * Attempt to create, mount, and share any ancestor filesystems, + * up to the prefixlen-long one. + */ + for (cp = target + prefixlen + 1; + cp = strchr(cp, '/'); *cp = '/', cp++) { + char *logstr; + + *cp = '\0'; + + h = make_dataset_handle(hdl, target); + if (h) { + /* it already exists, nothing to do here */ + zfs_close(h); + continue; + } + + logstr = hdl->libzfs_log_str; + hdl->libzfs_log_str = NULL; + if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM, + NULL) != 0) { + hdl->libzfs_log_str = logstr; + opname = dgettext(TEXT_DOMAIN, "create"); + goto ancestorerr; + } + + hdl->libzfs_log_str = logstr; + h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); + if (h == NULL) { + opname = dgettext(TEXT_DOMAIN, "open"); + goto ancestorerr; + } + + if (zfs_mount(h, NULL, 0) != 0) { + opname = dgettext(TEXT_DOMAIN, "mount"); + goto ancestorerr; + } + + if (zfs_share(h) != 0) { + opname = dgettext(TEXT_DOMAIN, "share"); + goto ancestorerr; + } + + zfs_close(h); + } + + return (0); + +ancestorerr: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "failed to %s ancestor '%s'"), opname, target); + return (-1); +} + +/* + * Creates non-existing ancestors of the given path. + */ +int +zfs_create_ancestors(libzfs_handle_t *hdl, const char *path) +{ + int prefix; + uint64_t zoned; + char *path_copy; + int rc; + + if (check_parents(hdl, path, &zoned, B_TRUE, &prefix) != 0) + return (-1); + + if ((path_copy = strdup(path)) != NULL) { + rc = create_parents(hdl, path_copy, prefix); + free(path_copy); + } + if (path_copy == NULL || rc != 0) + return (-1); + + return (0); +} + +/* + * Create a new filesystem or volume. + */ +int +zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type, + nvlist_t *props) +{ + zfs_cmd_t zc = { 0 }; + int ret; + uint64_t size = 0; + uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); + char errbuf[1024]; + uint64_t zoned; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot create '%s'"), path); + + /* validate the path, taking care to note the extended error message */ + if (!zfs_validate_name(hdl, path, type, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + + /* validate parents exist */ + if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0) + return (-1); + + /* + * The failure modes when creating a dataset of a different type over + * one that already exists is a little strange. In particular, if you + * try to create a dataset on top of an existing dataset, the ioctl() + * will return ENOENT, not EEXIST. To prevent this from happening, we + * first try to see if the dataset exists. + */ + (void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name)); + if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset already exists")); + return (zfs_error(hdl, EZFS_EXISTS, errbuf)); + } + + if (type == ZFS_TYPE_VOLUME) + zc.zc_objset_type = DMU_OST_ZVOL; + else + zc.zc_objset_type = DMU_OST_ZFS; + + if (props && (props = zfs_valid_proplist(hdl, type, props, + zoned, NULL, errbuf)) == 0) + return (-1); + + if (type == ZFS_TYPE_VOLUME) { + /* + * If we are creating a volume, the size and block size must + * satisfy a few restraints. First, the blocksize must be a + * valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the + * volsize must be a multiple of the block size, and cannot be + * zero. + */ + if (props == NULL || nvlist_lookup_uint64(props, + zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) { + nvlist_free(props); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "missing volume size")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + } + + if ((ret = nvlist_lookup_uint64(props, + zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), + &blocksize)) != 0) { + if (ret == ENOENT) { + blocksize = zfs_prop_default_numeric( + ZFS_PROP_VOLBLOCKSIZE); + } else { + nvlist_free(props); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "missing volume block size")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + } + } + + if (size == 0) { + nvlist_free(props); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "volume size cannot be zero")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + } + + if (size % blocksize != 0) { + nvlist_free(props); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "volume size must be a multiple of volume block " + "size")); + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + } + } + + if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0) + return (-1); + nvlist_free(props); + + /* create the dataset */ + ret = zfs_ioctl(hdl, ZFS_IOC_CREATE, &zc); + + if (ret == 0 && type == ZFS_TYPE_VOLUME) { + ret = zvol_create_link(hdl, path); + if (ret) { + (void) zfs_standard_error(hdl, errno, + dgettext(TEXT_DOMAIN, + "Volume successfully created, but device links " + "were not created")); + zcmd_free_nvlists(&zc); + return (-1); + } + } + + zcmd_free_nvlists(&zc); + + /* check for failure */ + if (ret != 0) { + char parent[ZFS_MAXNAMELEN]; + (void) parent_name(path, parent, sizeof (parent)); + + switch (errno) { + case ENOENT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "no such parent '%s'"), parent); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + + case EINVAL: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "parent '%s' is not a filesystem"), parent); + return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); + + case EDOM: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "volume block size must be power of 2 from " + "%u to %uk"), + (uint_t)SPA_MINBLOCKSIZE, + (uint_t)SPA_MAXBLOCKSIZE >> 10); + + return (zfs_error(hdl, EZFS_BADPROP, errbuf)); + + case ENOTSUP: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool must be upgraded to set this " + "property or value")); + return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); +#ifdef _ILP32 + case EOVERFLOW: + /* + * This platform can't address a volume this big. + */ + if (type == ZFS_TYPE_VOLUME) + return (zfs_error(hdl, EZFS_VOLTOOBIG, + errbuf)); +#endif + /* FALLTHROUGH */ + default: + return (zfs_standard_error(hdl, errno, errbuf)); + } + } + + return (0); +} + +/* + * Destroys the given dataset. The caller must make sure that the filesystem + * isn't mounted, and that there are no active dependents. + */ +int +zfs_destroy(zfs_handle_t *zhp) +{ + zfs_cmd_t zc = { 0 }; + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (ZFS_IS_VOLUME(zhp)) { + /* + * If user doesn't have permissions to unshare volume, then + * abort the request. This would only happen for a + * non-privileged user. + */ + if (zfs_unshare_iscsi(zhp) != 0) { + return (-1); + } + + if (zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0) + return (-1); + + zc.zc_objset_type = DMU_OST_ZVOL; + } else { + zc.zc_objset_type = DMU_OST_ZFS; + } + + if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0) { + return (zfs_standard_error_fmt(zhp->zfs_hdl, errno, + dgettext(TEXT_DOMAIN, "cannot destroy '%s'"), + zhp->zfs_name)); + } + + remove_mountpoint(zhp); + + return (0); +} + +struct destroydata { + char *snapname; + boolean_t gotone; + boolean_t closezhp; +}; + +static int +zfs_remove_link_cb(zfs_handle_t *zhp, void *arg) +{ + struct destroydata *dd = arg; + zfs_handle_t *szhp; + char name[ZFS_MAXNAMELEN]; + boolean_t closezhp = dd->closezhp; + int rv; + + (void) strlcpy(name, zhp->zfs_name, sizeof (name)); + (void) strlcat(name, "@", sizeof (name)); + (void) strlcat(name, dd->snapname, sizeof (name)); + + szhp = make_dataset_handle(zhp->zfs_hdl, name); + if (szhp) { + dd->gotone = B_TRUE; + zfs_close(szhp); + } + + if (zhp->zfs_type == ZFS_TYPE_VOLUME) { + (void) zvol_remove_link(zhp->zfs_hdl, name); + /* + * NB: this is simply a best-effort. We don't want to + * return an error, because then we wouldn't visit all + * the volumes. + */ + } + + dd->closezhp = B_TRUE; + rv = zfs_iter_filesystems(zhp, zfs_remove_link_cb, arg); + if (closezhp) + zfs_close(zhp); + return (rv); +} + +/* + * Destroys all snapshots with the given name in zhp & descendants. + */ +int +zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname) +{ + zfs_cmd_t zc = { 0 }; + int ret; + struct destroydata dd = { 0 }; + + dd.snapname = snapname; + (void) zfs_remove_link_cb(zhp, &dd); + + if (!dd.gotone) { + return (zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT, + dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"), + zhp->zfs_name, snapname)); + } + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value)); + + ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY_SNAPS, &zc); + if (ret != 0) { + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot destroy '%s@%s'"), zc.zc_name, snapname); + + switch (errno) { + case EEXIST: + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "snapshot is cloned")); + return (zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf)); + + default: + return (zfs_standard_error(zhp->zfs_hdl, errno, + errbuf)); + } + } + + return (0); +} + +/* + * Clones the given dataset. The target must be of the same type as the source. + */ +int +zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props) +{ + zfs_cmd_t zc = { 0 }; + char parent[ZFS_MAXNAMELEN]; + int ret; + char errbuf[1024]; + libzfs_handle_t *hdl = zhp->zfs_hdl; + zfs_type_t type; + uint64_t zoned; + + assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot create '%s'"), target); + + /* validate the target name */ + if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + + /* validate parents exist */ + if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0) + return (-1); + + (void) parent_name(target, parent, sizeof (parent)); + + /* do the clone */ + if (ZFS_IS_VOLUME(zhp)) { + zc.zc_objset_type = DMU_OST_ZVOL; + type = ZFS_TYPE_VOLUME; + } else { + zc.zc_objset_type = DMU_OST_ZFS; + type = ZFS_TYPE_FILESYSTEM; + } + + if (props) { + if ((props = zfs_valid_proplist(hdl, type, props, zoned, + zhp, errbuf)) == NULL) + return (-1); + + if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { + nvlist_free(props); + return (-1); + } + + nvlist_free(props); + } + + (void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value)); + ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_CREATE, &zc); + + zcmd_free_nvlists(&zc); + + if (ret != 0) { + switch (errno) { + + case ENOENT: + /* + * The parent doesn't exist. We should have caught this + * above, but there may a race condition that has since + * destroyed the parent. + * + * At this point, we don't know whether it's the source + * that doesn't exist anymore, or whether the target + * dataset doesn't exist. + */ + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "no such parent '%s'"), parent); + return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); + + case EXDEV: + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "source and target pools differ")); + return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET, + errbuf)); + + default: + return (zfs_standard_error(zhp->zfs_hdl, errno, + errbuf)); + } + } else if (ZFS_IS_VOLUME(zhp)) { + ret = zvol_create_link(zhp->zfs_hdl, target); + } + + return (ret); +} + +typedef struct promote_data { + char cb_mountpoint[MAXPATHLEN]; + const char *cb_target; + const char *cb_errbuf; + uint64_t cb_pivot_txg; +} promote_data_t; + +static int +promote_snap_cb(zfs_handle_t *zhp, void *data) +{ + promote_data_t *pd = data; + zfs_handle_t *szhp; + char snapname[MAXPATHLEN]; + int rv = 0; + + /* We don't care about snapshots after the pivot point */ + if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > pd->cb_pivot_txg) { + zfs_close(zhp); + return (0); + } + + /* Remove the device link if it's a zvol. */ + if (ZFS_IS_VOLUME(zhp)) + (void) zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name); + + /* Check for conflicting names */ + (void) strlcpy(snapname, pd->cb_target, sizeof (snapname)); + (void) strlcat(snapname, strchr(zhp->zfs_name, '@'), sizeof (snapname)); + szhp = make_dataset_handle(zhp->zfs_hdl, snapname); + if (szhp != NULL) { + zfs_close(szhp); + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "snapshot name '%s' from origin \n" + "conflicts with '%s' from target"), + zhp->zfs_name, snapname); + rv = zfs_error(zhp->zfs_hdl, EZFS_EXISTS, pd->cb_errbuf); + } + zfs_close(zhp); + return (rv); +} + +static int +promote_snap_done_cb(zfs_handle_t *zhp, void *data) +{ + promote_data_t *pd = data; + + /* We don't care about snapshots after the pivot point */ + if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) <= pd->cb_pivot_txg) { + /* Create the device link if it's a zvol. */ + if (ZFS_IS_VOLUME(zhp)) + (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); + } + + zfs_close(zhp); + return (0); +} + +/* + * Promotes the given clone fs to be the clone parent. + */ +int +zfs_promote(zfs_handle_t *zhp) +{ + libzfs_handle_t *hdl = zhp->zfs_hdl; + zfs_cmd_t zc = { 0 }; + char parent[MAXPATHLEN]; + char *cp; + int ret; + zfs_handle_t *pzhp; + promote_data_t pd; + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot promote '%s'"), zhp->zfs_name); + + if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "snapshots can not be promoted")); + return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); + } + + (void) strlcpy(parent, zhp->zfs_dmustats.dds_origin, sizeof (parent)); + if (parent[0] == '\0') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "not a cloned filesystem")); + return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); + } + cp = strchr(parent, '@'); + *cp = '\0'; + + /* Walk the snapshots we will be moving */ + pzhp = zfs_open(hdl, zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT); + if (pzhp == NULL) + return (-1); + pd.cb_pivot_txg = zfs_prop_get_int(pzhp, ZFS_PROP_CREATETXG); + zfs_close(pzhp); + pd.cb_target = zhp->zfs_name; + pd.cb_errbuf = errbuf; + pzhp = zfs_open(hdl, parent, ZFS_TYPE_DATASET); + if (pzhp == NULL) + return (-1); + (void) zfs_prop_get(pzhp, ZFS_PROP_MOUNTPOINT, pd.cb_mountpoint, + sizeof (pd.cb_mountpoint), NULL, NULL, 0, FALSE); + ret = zfs_iter_snapshots(pzhp, promote_snap_cb, &pd); + if (ret != 0) { + zfs_close(pzhp); + return (-1); + } + + /* issue the ioctl */ + (void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_origin, + sizeof (zc.zc_value)); + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + ret = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); + + if (ret != 0) { + int save_errno = errno; + + (void) zfs_iter_snapshots(pzhp, promote_snap_done_cb, &pd); + zfs_close(pzhp); + + switch (save_errno) { + case EEXIST: + /* + * There is a conflicting snapshot name. We + * should have caught this above, but they could + * have renamed something in the mean time. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "conflicting snapshot name from parent '%s'"), + parent); + return (zfs_error(hdl, EZFS_EXISTS, errbuf)); + + default: + return (zfs_standard_error(hdl, save_errno, errbuf)); + } + } else { + (void) zfs_iter_snapshots(zhp, promote_snap_done_cb, &pd); + } + + zfs_close(pzhp); + return (ret); +} + +struct createdata { + const char *cd_snapname; + int cd_ifexists; +}; + +static int +zfs_create_link_cb(zfs_handle_t *zhp, void *arg) +{ + struct createdata *cd = arg; + int ret; + + if (zhp->zfs_type == ZFS_TYPE_VOLUME) { + char name[MAXPATHLEN]; + + (void) strlcpy(name, zhp->zfs_name, sizeof (name)); + (void) strlcat(name, "@", sizeof (name)); + (void) strlcat(name, cd->cd_snapname, sizeof (name)); + (void) zvol_create_link_common(zhp->zfs_hdl, name, + cd->cd_ifexists); + /* + * NB: this is simply a best-effort. We don't want to + * return an error, because then we wouldn't visit all + * the volumes. + */ + } + + ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, cd); + + zfs_close(zhp); + + return (ret); +} + +/* + * Takes a snapshot of the given dataset. + */ +int +zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive, + nvlist_t *props) +{ + const char *delim; + char parent[ZFS_MAXNAMELEN]; + zfs_handle_t *zhp; + zfs_cmd_t zc = { 0 }; + int ret; + char errbuf[1024]; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot snapshot '%s'"), path); + + /* validate the target name */ + if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + + if (props) { + if ((props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT, + props, B_FALSE, NULL, errbuf)) == NULL) + return (-1); + + if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { + nvlist_free(props); + return (-1); + } + + nvlist_free(props); + } + + /* make sure the parent exists and is of the appropriate type */ + delim = strchr(path, '@'); + (void) strncpy(parent, path, delim - path); + parent[delim - path] = '\0'; + + if ((zhp = zfs_open(hdl, parent, ZFS_TYPE_FILESYSTEM | + ZFS_TYPE_VOLUME)) == NULL) { + zcmd_free_nvlists(&zc); + return (-1); + } + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, delim+1, sizeof (zc.zc_value)); + if (ZFS_IS_VOLUME(zhp)) + zc.zc_objset_type = DMU_OST_ZVOL; + else + zc.zc_objset_type = DMU_OST_ZFS; + zc.zc_cookie = recursive; + ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SNAPSHOT, &zc); + + zcmd_free_nvlists(&zc); + + /* + * if it was recursive, the one that actually failed will be in + * zc.zc_name. + */ + if (ret != 0) + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value); + + if (ret == 0 && recursive) { + struct createdata cd; + + cd.cd_snapname = delim + 1; + cd.cd_ifexists = B_FALSE; + (void) zfs_iter_filesystems(zhp, zfs_create_link_cb, &cd); + } + if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) { + ret = zvol_create_link(zhp->zfs_hdl, path); + if (ret != 0) { + (void) zfs_standard_error(hdl, errno, + dgettext(TEXT_DOMAIN, + "Volume successfully snapshotted, but device links " + "were not created")); + zfs_close(zhp); + return (-1); + } + } + + if (ret != 0) + (void) zfs_standard_error(hdl, errno, errbuf); + + zfs_close(zhp); + + return (ret); +} + +/* + * Destroy any more recent snapshots. We invoke this callback on any dependents + * of the snapshot first. If the 'cb_dependent' member is non-zero, then this + * is a dependent and we should just destroy it without checking the transaction + * group. + */ +typedef struct rollback_data { + const char *cb_target; /* the snapshot */ + uint64_t cb_create; /* creation time reference */ + boolean_t cb_error; + boolean_t cb_dependent; + boolean_t cb_force; +} rollback_data_t; + +static int +rollback_destroy(zfs_handle_t *zhp, void *data) +{ + rollback_data_t *cbp = data; + + if (!cbp->cb_dependent) { + if (strcmp(zhp->zfs_name, cbp->cb_target) != 0 && + zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && + zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > + cbp->cb_create) { + char *logstr; + + cbp->cb_dependent = B_TRUE; + cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE, + rollback_destroy, cbp); + cbp->cb_dependent = B_FALSE; + + logstr = zhp->zfs_hdl->libzfs_log_str; + zhp->zfs_hdl->libzfs_log_str = NULL; + cbp->cb_error |= zfs_destroy(zhp); + zhp->zfs_hdl->libzfs_log_str = logstr; + } + } else { + /* We must destroy this clone; first unmount it */ + prop_changelist_t *clp; + + clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, + cbp->cb_force ? MS_FORCE: 0); + if (clp == NULL || changelist_prefix(clp) != 0) { + cbp->cb_error = B_TRUE; + zfs_close(zhp); + return (0); + } + if (zfs_destroy(zhp) != 0) + cbp->cb_error = B_TRUE; + else + changelist_remove(clp, zhp->zfs_name); + (void) changelist_postfix(clp); + changelist_free(clp); + } + + zfs_close(zhp); + return (0); +} + +/* + * Given a dataset, rollback to a specific snapshot, discarding any + * data changes since then and making it the active dataset. + * + * Any snapshots more recent than the target are destroyed, along with + * their dependents. + */ +int +zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force) +{ + rollback_data_t cb = { 0 }; + int err; + zfs_cmd_t zc = { 0 }; + boolean_t restore_resv = 0; + uint64_t old_volsize, new_volsize; + zfs_prop_t resv_prop; + + assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM || + zhp->zfs_type == ZFS_TYPE_VOLUME); + + /* + * Destroy all recent snapshots and its dependends. + */ + cb.cb_force = force; + cb.cb_target = snap->zfs_name; + cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG); + (void) zfs_iter_children(zhp, rollback_destroy, &cb); + + if (cb.cb_error) + return (-1); + + /* + * Now that we have verified that the snapshot is the latest, + * rollback to the given snapshot. + */ + + if (zhp->zfs_type == ZFS_TYPE_VOLUME) { + if (zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name) != 0) + return (-1); + if (zfs_which_resv_prop(zhp, &resv_prop) < 0) + return (-1); + old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); + restore_resv = + (old_volsize == zfs_prop_get_int(zhp, resv_prop)); + } + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (ZFS_IS_VOLUME(zhp)) + zc.zc_objset_type = DMU_OST_ZVOL; + else + zc.zc_objset_type = DMU_OST_ZFS; + + /* + * We rely on zfs_iter_children() to verify that there are no + * newer snapshots for the given dataset. Therefore, we can + * simply pass the name on to the ioctl() call. There is still + * an unlikely race condition where the user has taken a + * snapshot since we verified that this was the most recent. + * + */ + if ((err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_ROLLBACK, &zc)) != 0) { + (void) zfs_standard_error_fmt(zhp->zfs_hdl, errno, + dgettext(TEXT_DOMAIN, "cannot rollback '%s'"), + zhp->zfs_name); + return (err); + } + + /* + * For volumes, if the pre-rollback volsize matched the pre- + * rollback reservation and the volsize has changed then set + * the reservation property to the post-rollback volsize. + * Make a new handle since the rollback closed the dataset. + */ + if ((zhp->zfs_type == ZFS_TYPE_VOLUME) && + (zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) { + if (err = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name)) { + zfs_close(zhp); + return (err); + } + if (restore_resv) { + new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); + if (old_volsize != new_volsize) + err = zfs_prop_set_int(zhp, resv_prop, + new_volsize); + } + zfs_close(zhp); + } + return (err); +} + +/* + * Iterate over all dependents for a given dataset. This includes both + * hierarchical dependents (children) and data dependents (snapshots and + * clones). The bulk of the processing occurs in get_dependents() in + * libzfs_graph.c. + */ +int +zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion, + zfs_iter_f func, void *data) +{ + char **dependents; + size_t count; + int i; + zfs_handle_t *child; + int ret = 0; + + if (get_dependents(zhp->zfs_hdl, allowrecursion, zhp->zfs_name, + &dependents, &count) != 0) + return (-1); + + for (i = 0; i < count; i++) { + if ((child = make_dataset_handle(zhp->zfs_hdl, + dependents[i])) == NULL) + continue; + + if ((ret = func(child, data)) != 0) + break; + } + + for (i = 0; i < count; i++) + free(dependents[i]); + free(dependents); + + return (ret); +} + +/* + * Renames the given dataset. + */ +int +zfs_rename(zfs_handle_t *zhp, const char *target, boolean_t recursive) +{ + int ret; + zfs_cmd_t zc = { 0 }; + char *delim; + prop_changelist_t *cl = NULL; + zfs_handle_t *zhrp = NULL; + char *parentname = NULL; + char parent[ZFS_MAXNAMELEN]; + libzfs_handle_t *hdl = zhp->zfs_hdl; + char errbuf[1024]; + + /* if we have the same exact name, just return success */ + if (strcmp(zhp->zfs_name, target) == 0) + return (0); + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot rename to '%s'"), target); + + /* + * Make sure the target name is valid + */ + if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { + if ((strchr(target, '@') == NULL) || + *target == '@') { + /* + * Snapshot target name is abbreviated, + * reconstruct full dataset name + */ + (void) strlcpy(parent, zhp->zfs_name, + sizeof (parent)); + delim = strchr(parent, '@'); + if (strchr(target, '@') == NULL) + *(++delim) = '\0'; + else + *delim = '\0'; + (void) strlcat(parent, target, sizeof (parent)); + target = parent; + } else { + /* + * Make sure we're renaming within the same dataset. + */ + delim = strchr(target, '@'); + if (strncmp(zhp->zfs_name, target, delim - target) + != 0 || zhp->zfs_name[delim - target] != '@') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "snapshots must be part of same " + "dataset")); + return (zfs_error(hdl, EZFS_CROSSTARGET, + errbuf)); + } + } + if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + } else { + if (recursive) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "recursive rename must be a snapshot")); + return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); + } + + if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + uint64_t unused; + + /* validate parents */ + if (check_parents(hdl, target, &unused, B_FALSE, NULL) != 0) + return (-1); + + (void) parent_name(target, parent, sizeof (parent)); + + /* make sure we're in the same pool */ + verify((delim = strchr(target, '/')) != NULL); + if (strncmp(zhp->zfs_name, target, delim - target) != 0 || + zhp->zfs_name[delim - target] != '/') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "datasets must be within same pool")); + return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); + } + + /* new name cannot be a child of the current dataset name */ + if (strncmp(parent, zhp->zfs_name, + strlen(zhp->zfs_name)) == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "New dataset name cannot be a descendent of " + "current dataset name")); + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + } + } + + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name); + + if (getzoneid() == GLOBAL_ZONEID && + zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset is used in a non-global zone")); + return (zfs_error(hdl, EZFS_ZONED, errbuf)); + } + + if (recursive) { + struct destroydata dd; + + parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name); + if (parentname == NULL) { + ret = -1; + goto error; + } + delim = strchr(parentname, '@'); + *delim = '\0'; + zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET); + if (zhrp == NULL) { + ret = -1; + goto error; + } + + dd.snapname = delim + 1; + dd.gotone = B_FALSE; + dd.closezhp = B_TRUE; + + /* We remove any zvol links prior to renaming them */ + ret = zfs_iter_filesystems(zhrp, zfs_remove_link_cb, &dd); + if (ret) { + goto error; + } + } else { + if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0)) == NULL) + return (-1); + + if (changelist_haszonedchild(cl)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "child dataset with inherited mountpoint is used " + "in a non-global zone")); + (void) zfs_error(hdl, EZFS_ZONED, errbuf); + goto error; + } + + if ((ret = changelist_prefix(cl)) != 0) + goto error; + } + + if (ZFS_IS_VOLUME(zhp)) + zc.zc_objset_type = DMU_OST_ZVOL; + else + zc.zc_objset_type = DMU_OST_ZFS; + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value)); + + zc.zc_cookie = recursive; + + if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) { + /* + * if it was recursive, the one that actually failed will + * be in zc.zc_name + */ + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot rename '%s'"), zc.zc_name); + + if (recursive && errno == EEXIST) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "a child dataset already has a snapshot " + "with the new name")); + (void) zfs_error(hdl, EZFS_EXISTS, errbuf); + } else { + (void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf); + } + + /* + * On failure, we still want to remount any filesystems that + * were previously mounted, so we don't alter the system state. + */ + if (recursive) { + struct createdata cd; + + /* only create links for datasets that had existed */ + cd.cd_snapname = delim + 1; + cd.cd_ifexists = B_TRUE; + (void) zfs_iter_filesystems(zhrp, zfs_create_link_cb, + &cd); + } else { + (void) changelist_postfix(cl); + } + } else { + if (recursive) { + struct createdata cd; + + /* only create links for datasets that had existed */ + cd.cd_snapname = strchr(target, '@') + 1; + cd.cd_ifexists = B_TRUE; + ret = zfs_iter_filesystems(zhrp, zfs_create_link_cb, + &cd); + } else { + changelist_rename(cl, zfs_get_name(zhp), target); + ret = changelist_postfix(cl); + } + } + +error: + if (parentname) { + free(parentname); + } + if (zhrp) { + zfs_close(zhrp); + } + if (cl) { + changelist_free(cl); + } + return (ret); +} + +/* + * Given a zvol dataset, issue the ioctl to create the appropriate minor node, + * poke devfsadm to create the /dev link, and then wait for the link to appear. + */ +int +zvol_create_link(libzfs_handle_t *hdl, const char *dataset) +{ + return (zvol_create_link_common(hdl, dataset, B_FALSE)); +} + +static int +zvol_create_link_common(libzfs_handle_t *hdl, const char *dataset, int ifexists) +{ + zfs_cmd_t zc = { 0 }; + di_devlink_handle_t dhdl; + priv_set_t *priv_effective; + int privileged; + + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + + /* + * Issue the appropriate ioctl. + */ + if (ioctl(hdl->libzfs_fd, ZFS_IOC_CREATE_MINOR, &zc) != 0) { + switch (errno) { + case EEXIST: + /* + * Silently ignore the case where the link already + * exists. This allows 'zfs volinit' to be run multiple + * times without errors. + */ + return (0); + + case ENOENT: + /* + * Dataset does not exist in the kernel. If we + * don't care (see zfs_rename), then ignore the + * error quietly. + */ + if (ifexists) { + return (0); + } + + /* FALLTHROUGH */ + + default: + return (zfs_standard_error_fmt(hdl, errno, + dgettext(TEXT_DOMAIN, "cannot create device links " + "for '%s'"), dataset)); + } + } + + /* + * If privileged call devfsadm and wait for the links to + * magically appear. + * Otherwise, print out an informational message. + */ + + priv_effective = priv_allocset(); + (void) getppriv(PRIV_EFFECTIVE, priv_effective); + privileged = (priv_isfullset(priv_effective) == B_TRUE); + priv_freeset(priv_effective); + + if (privileged) { + if ((dhdl = di_devlink_init(ZFS_DRIVER, + DI_MAKE_LINK)) == NULL) { + zfs_error_aux(hdl, strerror(errno)); + (void) zfs_error_fmt(hdl, errno, + dgettext(TEXT_DOMAIN, "cannot create device links " + "for '%s'"), dataset); + (void) ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc); + return (-1); + } else { + (void) di_devlink_fini(&dhdl); + } + } else { + char pathname[MAXPATHLEN]; + struct stat64 statbuf; + int i; + +#define MAX_WAIT 10 + + /* + * This is the poor mans way of waiting for the link + * to show up. If after 10 seconds we still don't + * have it, then print out a message. + */ + (void) snprintf(pathname, sizeof (pathname), "/dev/zvol/dsk/%s", + dataset); + + for (i = 0; i != MAX_WAIT; i++) { + if (stat64(pathname, &statbuf) == 0) + break; + (void) sleep(1); + } + if (i == MAX_WAIT) + (void) printf(gettext("%s may not be immediately " + "available\n"), pathname); + } + + return (0); +} + +/* + * Remove a minor node for the given zvol and the associated /dev links. + */ +int +zvol_remove_link(libzfs_handle_t *hdl, const char *dataset) +{ + zfs_cmd_t zc = { 0 }; + + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + + if (ioctl(hdl->libzfs_fd, ZFS_IOC_REMOVE_MINOR, &zc) != 0) { + switch (errno) { + case ENXIO: + /* + * Silently ignore the case where the link no longer + * exists, so that 'zfs volfini' can be run multiple + * times without errors. + */ + return (0); + + default: + return (zfs_standard_error_fmt(hdl, errno, + dgettext(TEXT_DOMAIN, "cannot remove device " + "links for '%s'"), dataset)); + } + } + + return (0); +} + +nvlist_t * +zfs_get_user_props(zfs_handle_t *zhp) +{ + return (zhp->zfs_user_props); +} + +/* + * This function is used by 'zfs list' to determine the exact set of columns to + * display, and their maximum widths. This does two main things: + * + * - If this is a list of all properties, then expand the list to include + * all native properties, and set a flag so that for each dataset we look + * for new unique user properties and add them to the list. + * + * - For non fixed-width properties, keep track of the maximum width seen + * so that we can size the column appropriately. + */ +int +zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp) +{ + libzfs_handle_t *hdl = zhp->zfs_hdl; + zprop_list_t *entry; + zprop_list_t **last, **start; + nvlist_t *userprops, *propval; + nvpair_t *elem; + char *strval; + char buf[ZFS_MAXPROPLEN]; + + if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0) + return (-1); + + userprops = zfs_get_user_props(zhp); + + entry = *plp; + if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) { + /* + * Go through and add any user properties as necessary. We + * start by incrementing our list pointer to the first + * non-native property. + */ + start = plp; + while (*start != NULL) { + if ((*start)->pl_prop == ZPROP_INVAL) + break; + start = &(*start)->pl_next; + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) { + /* + * See if we've already found this property in our list. + */ + for (last = start; *last != NULL; + last = &(*last)->pl_next) { + if (strcmp((*last)->pl_user_prop, + nvpair_name(elem)) == 0) + break; + } + + if (*last == NULL) { + if ((entry = zfs_alloc(hdl, + sizeof (zprop_list_t))) == NULL || + ((entry->pl_user_prop = zfs_strdup(hdl, + nvpair_name(elem)))) == NULL) { + free(entry); + return (-1); + } + + entry->pl_prop = ZPROP_INVAL; + entry->pl_width = strlen(nvpair_name(elem)); + entry->pl_all = B_TRUE; + *last = entry; + } + } + } + + /* + * Now go through and check the width of any non-fixed columns + */ + for (entry = *plp; entry != NULL; entry = entry->pl_next) { + if (entry->pl_fixed) + continue; + + if (entry->pl_prop != ZPROP_INVAL) { + if (zfs_prop_get(zhp, entry->pl_prop, + buf, sizeof (buf), NULL, NULL, 0, B_FALSE) == 0) { + if (strlen(buf) > entry->pl_width) + entry->pl_width = strlen(buf); + } + } else if (nvlist_lookup_nvlist(userprops, + entry->pl_user_prop, &propval) == 0) { + verify(nvlist_lookup_string(propval, + ZPROP_VALUE, &strval) == 0); + if (strlen(strval) > entry->pl_width) + entry->pl_width = strlen(strval); + } + } + + return (0); +} + +int +zfs_iscsi_perm_check(libzfs_handle_t *hdl, char *dataset, ucred_t *cred) +{ + zfs_cmd_t zc = { 0 }; + nvlist_t *nvp; + gid_t gid; + uid_t uid; + const gid_t *groups; + int group_cnt; + int error; + + if (nvlist_alloc(&nvp, NV_UNIQUE_NAME, 0) != 0) + return (no_memory(hdl)); + + uid = ucred_geteuid(cred); + gid = ucred_getegid(cred); + group_cnt = ucred_getgroups(cred, &groups); + + if (uid == (uid_t)-1 || gid == (uid_t)-1 || group_cnt == (uid_t)-1) + return (1); + + if (nvlist_add_uint32(nvp, ZFS_DELEG_PERM_UID, uid) != 0) { + nvlist_free(nvp); + return (1); + } + + if (nvlist_add_uint32(nvp, ZFS_DELEG_PERM_GID, gid) != 0) { + nvlist_free(nvp); + return (1); + } + + if (nvlist_add_uint32_array(nvp, + ZFS_DELEG_PERM_GROUPS, (uint32_t *)groups, group_cnt) != 0) { + nvlist_free(nvp); + return (1); + } + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + + if (zcmd_write_src_nvlist(hdl, &zc, nvp)) + return (-1); + + error = ioctl(hdl->libzfs_fd, ZFS_IOC_ISCSI_PERM_CHECK, &zc); + nvlist_free(nvp); + return (error); +} + +int +zfs_deleg_share_nfs(libzfs_handle_t *hdl, char *dataset, char *path, + void *export, void *sharetab, int sharemax, zfs_share_op_t operation) +{ + zfs_cmd_t zc = { 0 }; + int error; + + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value)); + zc.zc_share.z_sharedata = (uint64_t)(uintptr_t)sharetab; + zc.zc_share.z_exportdata = (uint64_t)(uintptr_t)export; + zc.zc_share.z_sharetype = operation; + zc.zc_share.z_sharemax = sharemax; + + error = ioctl(hdl->libzfs_fd, ZFS_IOC_SHARE, &zc); + return (error); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_graph.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_graph.c new file mode 100644 index 000000000000..e7cbf2386014 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_graph.c @@ -0,0 +1,662 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Iterate over all children of the current object. This includes the normal + * dataset hierarchy, but also arbitrary hierarchies due to clones. We want to + * walk all datasets in the pool, and construct a directed graph of the form: + * + * home + * | + * +----+----+ + * | | + * v v ws + * bar baz | + * | | + * v v + * @yesterday ----> foo + * + * In order to construct this graph, we have to walk every dataset in the pool, + * because the clone parent is stored as a property of the child, not the + * parent. The parent only keeps track of the number of clones. + * + * In the normal case (without clones) this would be rather expensive. To avoid + * unnecessary computation, we first try a walk of the subtree hierarchy + * starting from the initial node. At each dataset, we construct a node in the + * graph and an edge leading from its parent. If we don't see any snapshots + * with a non-zero clone count, then we are finished. + * + * If we do find a cloned snapshot, then we finish the walk of the current + * subtree, but indicate that we need to do a complete walk. We then perform a + * global walk of all datasets, avoiding the subtree we already processed. + * + * At the end of this, we'll end up with a directed graph of all relevant (and + * possible some irrelevant) datasets in the system. We need to both find our + * limiting subgraph and determine a safe ordering in which to destroy the + * datasets. We do a topological ordering of our graph starting at our target + * dataset, and then walk the results in reverse. + * + * It's possible for the graph to have cycles if, for example, the user renames + * a clone to be the parent of its origin snapshot. The user can request to + * generate an error in this case, or ignore the cycle and continue. + * + * When removing datasets, we want to destroy the snapshots in chronological + * order (because this is the most efficient method). In order to accomplish + * this, we store the creation transaction group with each vertex and keep each + * vertex's edges sorted according to this value. The topological sort will + * automatically walk the snapshots in the correct order. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libzfs_impl.h" +#include "zfs_namecheck.h" + +#define MIN_EDGECOUNT 4 + +/* + * Vertex structure. Indexed by dataset name, this structure maintains a list + * of edges to other vertices. + */ +struct zfs_edge; +typedef struct zfs_vertex { + char zv_dataset[ZFS_MAXNAMELEN]; + struct zfs_vertex *zv_next; + int zv_visited; + uint64_t zv_txg; + struct zfs_edge **zv_edges; + int zv_edgecount; + int zv_edgealloc; +} zfs_vertex_t; + +enum { + VISIT_SEEN = 1, + VISIT_SORT_PRE, + VISIT_SORT_POST +}; + +/* + * Edge structure. Simply maintains a pointer to the destination vertex. There + * is no need to store the source vertex, since we only use edges in the context + * of the source vertex. + */ +typedef struct zfs_edge { + zfs_vertex_t *ze_dest; + struct zfs_edge *ze_next; +} zfs_edge_t; + +#define ZFS_GRAPH_SIZE 1027 /* this could be dynamic some day */ + +/* + * Graph structure. Vertices are maintained in a hash indexed by dataset name. + */ +typedef struct zfs_graph { + zfs_vertex_t **zg_hash; + size_t zg_size; + size_t zg_nvertex; + const char *zg_root; + int zg_clone_count; +} zfs_graph_t; + +/* + * Allocate a new edge pointing to the target vertex. + */ +static zfs_edge_t * +zfs_edge_create(libzfs_handle_t *hdl, zfs_vertex_t *dest) +{ + zfs_edge_t *zep = zfs_alloc(hdl, sizeof (zfs_edge_t)); + + if (zep == NULL) + return (NULL); + + zep->ze_dest = dest; + + return (zep); +} + +/* + * Destroy an edge. + */ +static void +zfs_edge_destroy(zfs_edge_t *zep) +{ + free(zep); +} + +/* + * Allocate a new vertex with the given name. + */ +static zfs_vertex_t * +zfs_vertex_create(libzfs_handle_t *hdl, const char *dataset) +{ + zfs_vertex_t *zvp = zfs_alloc(hdl, sizeof (zfs_vertex_t)); + + if (zvp == NULL) + return (NULL); + + assert(strlen(dataset) < ZFS_MAXNAMELEN); + + (void) strlcpy(zvp->zv_dataset, dataset, sizeof (zvp->zv_dataset)); + + if ((zvp->zv_edges = zfs_alloc(hdl, + MIN_EDGECOUNT * sizeof (void *))) == NULL) { + free(zvp); + return (NULL); + } + + zvp->zv_edgealloc = MIN_EDGECOUNT; + + return (zvp); +} + +/* + * Destroy a vertex. Frees up any associated edges. + */ +static void +zfs_vertex_destroy(zfs_vertex_t *zvp) +{ + int i; + + for (i = 0; i < zvp->zv_edgecount; i++) + zfs_edge_destroy(zvp->zv_edges[i]); + + free(zvp->zv_edges); + free(zvp); +} + +/* + * Given a vertex, add an edge to the destination vertex. + */ +static int +zfs_vertex_add_edge(libzfs_handle_t *hdl, zfs_vertex_t *zvp, + zfs_vertex_t *dest) +{ + zfs_edge_t *zep = zfs_edge_create(hdl, dest); + + if (zep == NULL) + return (-1); + + if (zvp->zv_edgecount == zvp->zv_edgealloc) { + void *ptr; + + if ((ptr = zfs_realloc(hdl, zvp->zv_edges, + zvp->zv_edgealloc * sizeof (void *), + zvp->zv_edgealloc * 2 * sizeof (void *))) == NULL) + return (-1); + + zvp->zv_edges = ptr; + zvp->zv_edgealloc *= 2; + } + + zvp->zv_edges[zvp->zv_edgecount++] = zep; + + return (0); +} + +static int +zfs_edge_compare(const void *a, const void *b) +{ + const zfs_edge_t *ea = *((zfs_edge_t **)a); + const zfs_edge_t *eb = *((zfs_edge_t **)b); + + if (ea->ze_dest->zv_txg < eb->ze_dest->zv_txg) + return (-1); + if (ea->ze_dest->zv_txg > eb->ze_dest->zv_txg) + return (1); + return (0); +} + +/* + * Sort the given vertex edges according to the creation txg of each vertex. + */ +static void +zfs_vertex_sort_edges(zfs_vertex_t *zvp) +{ + if (zvp->zv_edgecount == 0) + return; + + qsort(zvp->zv_edges, zvp->zv_edgecount, sizeof (void *), + zfs_edge_compare); +} + +/* + * Construct a new graph object. We allow the size to be specified as a + * parameter so in the future we can size the hash according to the number of + * datasets in the pool. + */ +static zfs_graph_t * +zfs_graph_create(libzfs_handle_t *hdl, const char *dataset, size_t size) +{ + zfs_graph_t *zgp = zfs_alloc(hdl, sizeof (zfs_graph_t)); + + if (zgp == NULL) + return (NULL); + + zgp->zg_size = size; + if ((zgp->zg_hash = zfs_alloc(hdl, + size * sizeof (zfs_vertex_t *))) == NULL) { + free(zgp); + return (NULL); + } + + zgp->zg_root = dataset; + zgp->zg_clone_count = 0; + + return (zgp); +} + +/* + * Destroy a graph object. We have to iterate over all the hash chains, + * destroying each vertex in the process. + */ +static void +zfs_graph_destroy(zfs_graph_t *zgp) +{ + int i; + zfs_vertex_t *current, *next; + + for (i = 0; i < zgp->zg_size; i++) { + current = zgp->zg_hash[i]; + while (current != NULL) { + next = current->zv_next; + zfs_vertex_destroy(current); + current = next; + } + } + + free(zgp->zg_hash); + free(zgp); +} + +/* + * Graph hash function. Classic bernstein k=33 hash function, taken from + * usr/src/cmd/sgs/tools/common/strhash.c + */ +static size_t +zfs_graph_hash(zfs_graph_t *zgp, const char *str) +{ + size_t hash = 5381; + int c; + + while ((c = *str++) != 0) + hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ + + return (hash % zgp->zg_size); +} + +/* + * Given a dataset name, finds the associated vertex, creating it if necessary. + */ +static zfs_vertex_t * +zfs_graph_lookup(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset, + uint64_t txg) +{ + size_t idx = zfs_graph_hash(zgp, dataset); + zfs_vertex_t *zvp; + + for (zvp = zgp->zg_hash[idx]; zvp != NULL; zvp = zvp->zv_next) { + if (strcmp(zvp->zv_dataset, dataset) == 0) { + if (zvp->zv_txg == 0) + zvp->zv_txg = txg; + return (zvp); + } + } + + if ((zvp = zfs_vertex_create(hdl, dataset)) == NULL) + return (NULL); + + zvp->zv_next = zgp->zg_hash[idx]; + zvp->zv_txg = txg; + zgp->zg_hash[idx] = zvp; + zgp->zg_nvertex++; + + return (zvp); +} + +/* + * Given two dataset names, create an edge between them. For the source vertex, + * mark 'zv_visited' to indicate that we have seen this vertex, and not simply + * created it as a destination of another edge. If 'dest' is NULL, then this + * is an individual vertex (i.e. the starting vertex), so don't add an edge. + */ +static int +zfs_graph_add(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *source, + const char *dest, uint64_t txg) +{ + zfs_vertex_t *svp, *dvp; + + if ((svp = zfs_graph_lookup(hdl, zgp, source, 0)) == NULL) + return (-1); + svp->zv_visited = VISIT_SEEN; + if (dest != NULL) { + dvp = zfs_graph_lookup(hdl, zgp, dest, txg); + if (dvp == NULL) + return (-1); + if (zfs_vertex_add_edge(hdl, svp, dvp) != 0) + return (-1); + } + + return (0); +} + +/* + * Iterate over all children of the given dataset, adding any vertices + * as necessary. Returns -1 if there was an error, or 0 otherwise. + * This is a simple recursive algorithm - the ZFS namespace typically + * is very flat. We manually invoke the necessary ioctl() calls to + * avoid the overhead and additional semantics of zfs_open(). + */ +static int +iterate_children(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset) +{ + zfs_cmd_t zc = { 0 }; + zfs_vertex_t *zvp; + + /* + * Look up the source vertex, and avoid it if we've seen it before. + */ + zvp = zfs_graph_lookup(hdl, zgp, dataset, 0); + if (zvp == NULL) + return (-1); + if (zvp->zv_visited == VISIT_SEEN) + return (0); + + /* + * Iterate over all children + */ + for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + ioctl(hdl->libzfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0; + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) { + + /* + * Ignore private dataset names. + */ + if (dataset_name_hidden(zc.zc_name)) + continue; + + /* + * Get statistics for this dataset, to determine the type of the + * dataset and clone statistics. If this fails, the dataset has + * since been removed, and we're pretty much screwed anyway. + */ + zc.zc_objset_stats.dds_origin[0] = '\0'; + if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) + continue; + + if (zc.zc_objset_stats.dds_origin[0] != '\0') { + if (zfs_graph_add(hdl, zgp, + zc.zc_objset_stats.dds_origin, zc.zc_name, + zc.zc_objset_stats.dds_creation_txg) != 0) + return (-1); + /* + * Count origins only if they are contained in the graph + */ + if (isa_child_of(zc.zc_objset_stats.dds_origin, + zgp->zg_root)) + zgp->zg_clone_count--; + } + + /* + * Add an edge between the parent and the child. + */ + if (zfs_graph_add(hdl, zgp, dataset, zc.zc_name, + zc.zc_objset_stats.dds_creation_txg) != 0) + return (-1); + + /* + * Recursively visit child + */ + if (iterate_children(hdl, zgp, zc.zc_name)) + return (-1); + } + + /* + * Now iterate over all snapshots. + */ + bzero(&zc, sizeof (zc)); + + for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0; + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) { + + /* + * Get statistics for this dataset, to determine the type of the + * dataset and clone statistics. If this fails, the dataset has + * since been removed, and we're pretty much screwed anyway. + */ + if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) + continue; + + /* + * Add an edge between the parent and the child. + */ + if (zfs_graph_add(hdl, zgp, dataset, zc.zc_name, + zc.zc_objset_stats.dds_creation_txg) != 0) + return (-1); + + zgp->zg_clone_count += zc.zc_objset_stats.dds_num_clones; + } + + zvp->zv_visited = VISIT_SEEN; + + return (0); +} + +/* + * Returns false if there are no snapshots with dependent clones in this + * subtree or if all of those clones are also in this subtree. Returns + * true if there is an error or there are external dependents. + */ +static boolean_t +external_dependents(libzfs_handle_t *hdl, zfs_graph_t *zgp, const char *dataset) +{ + zfs_cmd_t zc = { 0 }; + + /* + * Check whether this dataset is a clone or has clones since + * iterate_children() only checks the children. + */ + (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); + if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) + return (B_TRUE); + + if (zc.zc_objset_stats.dds_origin[0] != '\0') { + if (zfs_graph_add(hdl, zgp, + zc.zc_objset_stats.dds_origin, zc.zc_name, + zc.zc_objset_stats.dds_creation_txg) != 0) + return (B_TRUE); + if (isa_child_of(zc.zc_objset_stats.dds_origin, dataset)) + zgp->zg_clone_count--; + } + + if ((zc.zc_objset_stats.dds_num_clones) || + iterate_children(hdl, zgp, dataset)) + return (B_TRUE); + + return (zgp->zg_clone_count != 0); +} + +/* + * Construct a complete graph of all necessary vertices. First, iterate over + * only our object's children. If no cloned snapshots are found, or all of + * the cloned snapshots are in this subtree then return a graph of the subtree. + * Otherwise, start at the root of the pool and iterate over all datasets. + */ +static zfs_graph_t * +construct_graph(libzfs_handle_t *hdl, const char *dataset) +{ + zfs_graph_t *zgp = zfs_graph_create(hdl, dataset, ZFS_GRAPH_SIZE); + int ret = 0; + + if (zgp == NULL) + return (zgp); + + if ((strchr(dataset, '/') == NULL) || + (external_dependents(hdl, zgp, dataset))) { + /* + * Determine pool name and try again. + */ + int len = strcspn(dataset, "/@") + 1; + char *pool = zfs_alloc(hdl, len); + + if (pool == NULL) { + zfs_graph_destroy(zgp); + return (NULL); + } + (void) strlcpy(pool, dataset, len); + + if (iterate_children(hdl, zgp, pool) == -1 || + zfs_graph_add(hdl, zgp, pool, NULL, 0) != 0) { + free(pool); + zfs_graph_destroy(zgp); + return (NULL); + } + free(pool); + } + + if (ret == -1 || zfs_graph_add(hdl, zgp, dataset, NULL, 0) != 0) { + zfs_graph_destroy(zgp); + return (NULL); + } + + return (zgp); +} + +/* + * Given a graph, do a recursive topological sort into the given array. This is + * really just a depth first search, so that the deepest nodes appear first. + * hijack the 'zv_visited' marker to avoid visiting the same vertex twice. + */ +static int +topo_sort(libzfs_handle_t *hdl, boolean_t allowrecursion, char **result, + size_t *idx, zfs_vertex_t *zgv) +{ + int i; + + if (zgv->zv_visited == VISIT_SORT_PRE && !allowrecursion) { + /* + * If we've already seen this vertex as part of our depth-first + * search, then we have a cyclic dependency, and we must return + * an error. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "recursive dependency at '%s'"), + zgv->zv_dataset); + return (zfs_error(hdl, EZFS_RECURSIVE, + dgettext(TEXT_DOMAIN, + "cannot determine dependent datasets"))); + } else if (zgv->zv_visited >= VISIT_SORT_PRE) { + /* + * If we've already processed this as part of the topological + * sort, then don't bother doing so again. + */ + return (0); + } + + zgv->zv_visited = VISIT_SORT_PRE; + + /* avoid doing a search if we don't have to */ + zfs_vertex_sort_edges(zgv); + for (i = 0; i < zgv->zv_edgecount; i++) { + if (topo_sort(hdl, allowrecursion, result, idx, + zgv->zv_edges[i]->ze_dest) != 0) + return (-1); + } + + /* we may have visited this in the course of the above */ + if (zgv->zv_visited == VISIT_SORT_POST) + return (0); + + if ((result[*idx] = zfs_alloc(hdl, + strlen(zgv->zv_dataset) + 1)) == NULL) + return (-1); + + (void) strcpy(result[*idx], zgv->zv_dataset); + *idx += 1; + zgv->zv_visited = VISIT_SORT_POST; + return (0); +} + +/* + * The only public interface for this file. Do the dirty work of constructing a + * child list for the given object. Construct the graph, do the toplogical + * sort, and then return the array of strings to the caller. + * + * The 'allowrecursion' parameter controls behavior when cycles are found. If + * it is set, the the cycle is ignored and the results returned as if the cycle + * did not exist. If it is not set, then the routine will generate an error if + * a cycle is found. + */ +int +get_dependents(libzfs_handle_t *hdl, boolean_t allowrecursion, + const char *dataset, char ***result, size_t *count) +{ + zfs_graph_t *zgp; + zfs_vertex_t *zvp; + + if ((zgp = construct_graph(hdl, dataset)) == NULL) + return (-1); + + if ((*result = zfs_alloc(hdl, + zgp->zg_nvertex * sizeof (char *))) == NULL) { + zfs_graph_destroy(zgp); + return (-1); + } + + if ((zvp = zfs_graph_lookup(hdl, zgp, dataset, 0)) == NULL) { + free(*result); + zfs_graph_destroy(zgp); + return (-1); + } + + *count = 0; + if (topo_sort(hdl, allowrecursion, *result, count, zvp) != 0) { + free(*result); + zfs_graph_destroy(zgp); + return (-1); + } + + /* + * Get rid of the last entry, which is our starting vertex and not + * strictly a dependent. + */ + assert(*count > 0); + free((*result)[*count - 1]); + (*count)--; + + zfs_graph_destroy(zgp); + + return (0); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_impl.h b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_impl.h new file mode 100644 index 000000000000..9f1f66d51db5 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_impl.h @@ -0,0 +1,193 @@ +/* + * CDDL HEADER SART + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _LIBFS_IMPL_H +#define _LIBFS_IMPL_H + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef VERIFY +#undef VERIFY +#endif +#define VERIFY verify + +struct libzfs_handle { + int libzfs_error; + int libzfs_fd; + FILE *libzfs_mnttab; + FILE *libzfs_sharetab; + zpool_handle_t *libzfs_pool_handles; + uu_avl_pool_t *libzfs_ns_avlpool; + uu_avl_t *libzfs_ns_avl; + uint64_t libzfs_ns_gen; + int libzfs_desc_active; + char libzfs_action[1024]; + char libzfs_desc[1024]; + char *libzfs_log_str; + int libzfs_printerr; + void *libzfs_sharehdl; /* libshare handle */ + uint_t libzfs_shareflags; +}; +#define ZFSSHARE_MISS 0x01 /* Didn't find entry in cache */ + +struct zfs_handle { + libzfs_handle_t *zfs_hdl; + zpool_handle_t *zpool_hdl; + char zfs_name[ZFS_MAXNAMELEN]; + zfs_type_t zfs_type; /* type including snapshot */ + zfs_type_t zfs_head_type; /* type excluding snapshot */ + dmu_objset_stats_t zfs_dmustats; + nvlist_t *zfs_props; + nvlist_t *zfs_user_props; + boolean_t zfs_mntcheck; + char *zfs_mntopts; +}; + +/* + * This is different from checking zfs_type, because it will also catch + * snapshots of volumes. + */ +#define ZFS_IS_VOLUME(zhp) ((zhp)->zfs_head_type == ZFS_TYPE_VOLUME) + +struct zpool_handle { + libzfs_handle_t *zpool_hdl; + zpool_handle_t *zpool_next; + char zpool_name[ZPOOL_MAXNAMELEN]; + int zpool_state; + size_t zpool_config_size; + nvlist_t *zpool_config; + nvlist_t *zpool_old_config; + nvlist_t *zpool_props; + diskaddr_t zpool_start_block; +}; + +typedef enum { + PROTO_NFS = 0, + PROTO_SMB = 1, + PROTO_END = 2 +} zfs_share_proto_t; + +/* + * The following can be used as a bitmask and any new values + * added must preserve that capability. + */ +typedef enum { + SHARED_NOT_SHARED = 0x0, + SHARED_ISCSI = 0x1, + SHARED_NFS = 0x2, + SHARED_SMB = 0x4 +} zfs_share_type_t; + +int zfs_error(libzfs_handle_t *, int, const char *); +int zfs_error_fmt(libzfs_handle_t *, int, const char *, ...); +void zfs_error_aux(libzfs_handle_t *, const char *, ...); +void *zfs_alloc(libzfs_handle_t *, size_t); +void *zfs_realloc(libzfs_handle_t *, void *, size_t, size_t); +char *zfs_strdup(libzfs_handle_t *, const char *); +int no_memory(libzfs_handle_t *); + +int zfs_standard_error(libzfs_handle_t *, int, const char *); +int zfs_standard_error_fmt(libzfs_handle_t *, int, const char *, ...); +int zpool_standard_error(libzfs_handle_t *, int, const char *); +int zpool_standard_error_fmt(libzfs_handle_t *, int, const char *, ...); + +int get_dependents(libzfs_handle_t *, boolean_t, const char *, char ***, + size_t *); + + +int zprop_parse_value(libzfs_handle_t *, nvpair_t *, int, zfs_type_t, + nvlist_t *, char **, uint64_t *, const char *); +int zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, + zfs_type_t type); + +/* + * Use this changelist_gather() flag to force attempting mounts + * on each change node regardless of whether or not it is currently + * mounted. + */ +#define CL_GATHER_MOUNT_ALWAYS 1 + +typedef struct prop_changelist prop_changelist_t; + +int zcmd_alloc_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, size_t); +int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *); +int zcmd_write_conf_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *); +int zcmd_expand_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *); +int zcmd_read_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t **); +void zcmd_free_nvlists(zfs_cmd_t *); + +int changelist_prefix(prop_changelist_t *); +int changelist_postfix(prop_changelist_t *); +void changelist_rename(prop_changelist_t *, const char *, const char *); +void changelist_remove(prop_changelist_t *, const char *); +void changelist_free(prop_changelist_t *); +prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int, int); +int changelist_unshare(prop_changelist_t *, zfs_share_proto_t *); +int changelist_haszonedchild(prop_changelist_t *); + +void remove_mountpoint(zfs_handle_t *); +int create_parents(libzfs_handle_t *, char *, int); +boolean_t isa_child_of(const char *dataset, const char *parent); + +zfs_handle_t *make_dataset_handle(libzfs_handle_t *, const char *); + +int zpool_open_silent(libzfs_handle_t *, const char *, zpool_handle_t **); + +int zvol_create_link(libzfs_handle_t *, const char *); +int zvol_remove_link(libzfs_handle_t *, const char *); +int zpool_iter_zvol(zpool_handle_t *, int (*)(const char *, void *), void *); +boolean_t zpool_name_valid(libzfs_handle_t *, boolean_t, const char *); + +void namespace_clear(libzfs_handle_t *); + +/* + * libshare (sharemgr) interfaces used internally. + */ + +extern int zfs_init_libshare(libzfs_handle_t *, int); +extern void zfs_uninit_libshare(libzfs_handle_t *); +extern int zfs_parse_options(char *, zfs_share_proto_t); + +extern int zfs_unshare_proto(zfs_handle_t *zhp, + const char *, zfs_share_proto_t *); +#ifdef __cplusplus +} +#endif + +#endif /* _LIBFS_IMPL_H */ diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_import.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_import.c new file mode 100644 index 000000000000..d67776889d35 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_import.c @@ -0,0 +1,1311 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Pool import support functions. + * + * To import a pool, we rely on reading the configuration information from the + * ZFS label of each device. If we successfully read the label, then we + * organize the configuration information in the following hierarchy: + * + * pool guid -> toplevel vdev guid -> label txg + * + * Duplicate entries matching this same tuple will be discarded. Once we have + * examined every device, we pick the best label txg config for each toplevel + * vdev. We then arrange these toplevel vdevs into a complete pool config, and + * update any paths that have changed. Finally, we attempt to import the pool + * using our derived config, and record the results. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libzfs.h" +#include "libzfs_impl.h" + +/* + * Intermediate structures used to gather configuration information. + */ +typedef struct config_entry { + uint64_t ce_txg; + nvlist_t *ce_config; + struct config_entry *ce_next; +} config_entry_t; + +typedef struct vdev_entry { + uint64_t ve_guid; + config_entry_t *ve_configs; + struct vdev_entry *ve_next; +} vdev_entry_t; + +typedef struct pool_entry { + uint64_t pe_guid; + vdev_entry_t *pe_vdevs; + struct pool_entry *pe_next; +} pool_entry_t; + +typedef struct name_entry { + char *ne_name; + uint64_t ne_guid; + struct name_entry *ne_next; +} name_entry_t; + +typedef struct pool_list { + pool_entry_t *pools; + name_entry_t *names; +} pool_list_t; + +static char * +get_devid(const char *path) +{ + int fd; + ddi_devid_t devid; + char *minor, *ret; + + if ((fd = open(path, O_RDONLY)) < 0) + return (NULL); + + minor = NULL; + ret = NULL; + if (devid_get(fd, &devid) == 0) { + if (devid_get_minor_name(fd, &minor) == 0) + ret = devid_str_encode(devid, minor); + if (minor != NULL) + devid_str_free(minor); + devid_free(devid); + } + (void) close(fd); + + return (ret); +} + + +/* + * Go through and fix up any path and/or devid information for the given vdev + * configuration. + */ +static int +fix_paths(nvlist_t *nv, name_entry_t *names) +{ + nvlist_t **child; + uint_t c, children; + uint64_t guid; + name_entry_t *ne, *best; + char *path, *devid; + int matched; + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if (fix_paths(child[c], names) != 0) + return (-1); + return (0); + } + + /* + * This is a leaf (file or disk) vdev. In either case, go through + * the name list and see if we find a matching guid. If so, replace + * the path and see if we can calculate a new devid. + * + * There may be multiple names associated with a particular guid, in + * which case we have overlapping slices or multiple paths to the same + * disk. If this is the case, then we want to pick the path that is + * the most similar to the original, where "most similar" is the number + * of matching characters starting from the end of the path. This will + * preserve slice numbers even if the disks have been reorganized, and + * will also catch preferred disk names if multiple paths exist. + */ + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) + path = NULL; + + matched = 0; + best = NULL; + for (ne = names; ne != NULL; ne = ne->ne_next) { + if (ne->ne_guid == guid) { + const char *src, *dst; + int count; + + if (path == NULL) { + best = ne; + break; + } + + src = ne->ne_name + strlen(ne->ne_name) - 1; + dst = path + strlen(path) - 1; + for (count = 0; src >= ne->ne_name && dst >= path; + src--, dst--, count++) + if (*src != *dst) + break; + + /* + * At this point, 'count' is the number of characters + * matched from the end. + */ + if (count > matched || best == NULL) { + best = ne; + matched = count; + } + } + } + + if (best == NULL) + return (0); + + if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) + return (-1); + + if ((devid = get_devid(best->ne_name)) == NULL) { + (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); + } else { + if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) + return (-1); + devid_str_free(devid); + } + + return (0); +} + +/* + * Add the given configuration to the list of known devices. + */ +static int +add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, + nvlist_t *config) +{ + uint64_t pool_guid, vdev_guid, top_guid, txg, state; + pool_entry_t *pe; + vdev_entry_t *ve; + config_entry_t *ce; + name_entry_t *ne; + + /* + * If this is a hot spare not currently in use or level 2 cache + * device, add it to the list of names to translate, but don't do + * anything else. + */ + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, + &state) == 0 && + (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) && + nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { + if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) + return (-1); + + if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { + free(ne); + return (-1); + } + ne->ne_guid = vdev_guid; + ne->ne_next = pl->names; + pl->names = ne; + return (0); + } + + /* + * If we have a valid config but cannot read any of these fields, then + * it means we have a half-initialized label. In vdev_label_init() + * we write a label with txg == 0 so that we can identify the device + * in case the user refers to the same disk later on. If we fail to + * create the pool, we'll be left with a label in this state + * which should not be considered part of a valid pool. + */ + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &pool_guid) != 0 || + nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, + &vdev_guid) != 0 || + nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, + &top_guid) != 0 || + nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, + &txg) != 0 || txg == 0) { + nvlist_free(config); + return (0); + } + + /* + * First, see if we know about this pool. If not, then add it to the + * list of known pools. + */ + for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { + if (pe->pe_guid == pool_guid) + break; + } + + if (pe == NULL) { + if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { + nvlist_free(config); + return (-1); + } + pe->pe_guid = pool_guid; + pe->pe_next = pl->pools; + pl->pools = pe; + } + + /* + * Second, see if we know about this toplevel vdev. Add it if its + * missing. + */ + for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { + if (ve->ve_guid == top_guid) + break; + } + + if (ve == NULL) { + if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { + nvlist_free(config); + return (-1); + } + ve->ve_guid = top_guid; + ve->ve_next = pe->pe_vdevs; + pe->pe_vdevs = ve; + } + + /* + * Third, see if we have a config with a matching transaction group. If + * so, then we do nothing. Otherwise, add it to the list of known + * configs. + */ + for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { + if (ce->ce_txg == txg) + break; + } + + if (ce == NULL) { + if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { + nvlist_free(config); + return (-1); + } + ce->ce_txg = txg; + ce->ce_config = config; + ce->ce_next = ve->ve_configs; + ve->ve_configs = ce; + } else { + nvlist_free(config); + } + + /* + * At this point we've successfully added our config to the list of + * known configs. The last thing to do is add the vdev guid -> path + * mappings so that we can fix up the configuration as necessary before + * doing the import. + */ + if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) + return (-1); + + if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { + free(ne); + return (-1); + } + + ne->ne_guid = vdev_guid; + ne->ne_next = pl->names; + pl->names = ne; + + return (0); +} + +/* + * Returns true if the named pool matches the given GUID. + */ +static int +pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, + boolean_t *isactive) +{ + zpool_handle_t *zhp; + uint64_t theguid; + + if (zpool_open_silent(hdl, name, &zhp) != 0) + return (-1); + + if (zhp == NULL) { + *isactive = B_FALSE; + return (0); + } + + verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, + &theguid) == 0); + + zpool_close(zhp); + + *isactive = (theguid == guid); + return (0); +} + +static nvlist_t * +refresh_config(libzfs_handle_t *hdl, nvlist_t *config) +{ + nvlist_t *nvl; + zfs_cmd_t zc = { 0 }; + int err; + + if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) + return (NULL); + + if (zcmd_alloc_dst_nvlist(hdl, &zc, + zc.zc_nvlist_conf_size * 2) != 0) { + zcmd_free_nvlists(&zc); + return (NULL); + } + + while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, + &zc)) != 0 && errno == ENOMEM) { + if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (NULL); + } + } + + if (err) { + (void) zpool_standard_error(hdl, errno, + dgettext(TEXT_DOMAIN, "cannot discover pools")); + zcmd_free_nvlists(&zc); + return (NULL); + } + + if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { + zcmd_free_nvlists(&zc); + return (NULL); + } + + zcmd_free_nvlists(&zc); + return (nvl); +} + +/* + * Convert our list of pools into the definitive set of configurations. We + * start by picking the best config for each toplevel vdev. Once that's done, + * we assemble the toplevel vdevs into a full config for the pool. We make a + * pass to fix up any incorrect paths, and then add it to the main list to + * return to the user. + */ +static nvlist_t * +get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) +{ + pool_entry_t *pe; + vdev_entry_t *ve; + config_entry_t *ce; + nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; + nvlist_t **spares, **l2cache; + uint_t i, nspares, nl2cache; + boolean_t config_seen; + uint64_t best_txg; + char *name, *hostname; + uint64_t version, guid; + uint_t children = 0; + nvlist_t **child = NULL; + uint_t c; + boolean_t isactive; + uint64_t hostid; + nvlist_t *nvl; + boolean_t found_one = B_FALSE; + + if (nvlist_alloc(&ret, 0, 0) != 0) + goto nomem; + + for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { + uint64_t id; + + if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) + goto nomem; + config_seen = B_FALSE; + + /* + * Iterate over all toplevel vdevs. Grab the pool configuration + * from the first one we find, and then go through the rest and + * add them as necessary to the 'vdevs' member of the config. + */ + for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { + + /* + * Determine the best configuration for this vdev by + * selecting the config with the latest transaction + * group. + */ + best_txg = 0; + for (ce = ve->ve_configs; ce != NULL; + ce = ce->ce_next) { + + if (ce->ce_txg > best_txg) { + tmp = ce->ce_config; + best_txg = ce->ce_txg; + } + } + + if (!config_seen) { + /* + * Copy the relevant pieces of data to the pool + * configuration: + * + * version + * pool guid + * name + * pool state + * hostid (if available) + * hostname (if available) + */ + uint64_t state; + + verify(nvlist_lookup_uint64(tmp, + ZPOOL_CONFIG_VERSION, &version) == 0); + if (nvlist_add_uint64(config, + ZPOOL_CONFIG_VERSION, version) != 0) + goto nomem; + verify(nvlist_lookup_uint64(tmp, + ZPOOL_CONFIG_POOL_GUID, &guid) == 0); + if (nvlist_add_uint64(config, + ZPOOL_CONFIG_POOL_GUID, guid) != 0) + goto nomem; + verify(nvlist_lookup_string(tmp, + ZPOOL_CONFIG_POOL_NAME, &name) == 0); + if (nvlist_add_string(config, + ZPOOL_CONFIG_POOL_NAME, name) != 0) + goto nomem; + verify(nvlist_lookup_uint64(tmp, + ZPOOL_CONFIG_POOL_STATE, &state) == 0); + if (nvlist_add_uint64(config, + ZPOOL_CONFIG_POOL_STATE, state) != 0) + goto nomem; + hostid = 0; + if (nvlist_lookup_uint64(tmp, + ZPOOL_CONFIG_HOSTID, &hostid) == 0) { + if (nvlist_add_uint64(config, + ZPOOL_CONFIG_HOSTID, hostid) != 0) + goto nomem; + verify(nvlist_lookup_string(tmp, + ZPOOL_CONFIG_HOSTNAME, + &hostname) == 0); + if (nvlist_add_string(config, + ZPOOL_CONFIG_HOSTNAME, + hostname) != 0) + goto nomem; + } + + config_seen = B_TRUE; + } + + /* + * Add this top-level vdev to the child array. + */ + verify(nvlist_lookup_nvlist(tmp, + ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); + verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, + &id) == 0); + if (id >= children) { + nvlist_t **newchild; + + newchild = zfs_alloc(hdl, (id + 1) * + sizeof (nvlist_t *)); + if (newchild == NULL) + goto nomem; + + for (c = 0; c < children; c++) + newchild[c] = child[c]; + + free(child); + child = newchild; + children = id + 1; + } + if (nvlist_dup(nvtop, &child[id], 0) != 0) + goto nomem; + + } + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &guid) == 0); + + /* + * Look for any missing top-level vdevs. If this is the case, + * create a faked up 'missing' vdev as a placeholder. We cannot + * simply compress the child array, because the kernel performs + * certain checks to make sure the vdev IDs match their location + * in the configuration. + */ + for (c = 0; c < children; c++) + if (child[c] == NULL) { + nvlist_t *missing; + if (nvlist_alloc(&missing, NV_UNIQUE_NAME, + 0) != 0) + goto nomem; + if (nvlist_add_string(missing, + ZPOOL_CONFIG_TYPE, + VDEV_TYPE_MISSING) != 0 || + nvlist_add_uint64(missing, + ZPOOL_CONFIG_ID, c) != 0 || + nvlist_add_uint64(missing, + ZPOOL_CONFIG_GUID, 0ULL) != 0) { + nvlist_free(missing); + goto nomem; + } + child[c] = missing; + } + + /* + * Put all of this pool's top-level vdevs into a root vdev. + */ + if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) + goto nomem; + if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, + VDEV_TYPE_ROOT) != 0 || + nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || + nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || + nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + child, children) != 0) { + nvlist_free(nvroot); + goto nomem; + } + + for (c = 0; c < children; c++) + nvlist_free(child[c]); + free(child); + children = 0; + child = NULL; + + /* + * Go through and fix up any paths and/or devids based on our + * known list of vdev GUID -> path mappings. + */ + if (fix_paths(nvroot, pl->names) != 0) { + nvlist_free(nvroot); + goto nomem; + } + + /* + * Add the root vdev to this pool's configuration. + */ + if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + nvroot) != 0) { + nvlist_free(nvroot); + goto nomem; + } + nvlist_free(nvroot); + + /* + * zdb uses this path to report on active pools that were + * imported or created using -R. + */ + if (active_ok) + goto add_pool; + + /* + * Determine if this pool is currently active, in which case we + * can't actually import it. + */ + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &guid) == 0); + + if (pool_active(hdl, name, guid, &isactive) != 0) + goto error; + + if (isactive) { + nvlist_free(config); + config = NULL; + continue; + } + + if ((nvl = refresh_config(hdl, config)) == NULL) + goto error; + + nvlist_free(config); + config = nvl; + + /* + * Go through and update the paths for spares, now that we have + * them. + */ + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + for (i = 0; i < nspares; i++) { + if (fix_paths(spares[i], pl->names) != 0) + goto nomem; + } + } + + /* + * Update the paths for l2cache devices. + */ + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) { + for (i = 0; i < nl2cache; i++) { + if (fix_paths(l2cache[i], pl->names) != 0) + goto nomem; + } + } + + /* + * Restore the original information read from the actual label. + */ + (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, + DATA_TYPE_UINT64); + (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, + DATA_TYPE_STRING); + if (hostid != 0) { + verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, + hostid) == 0); + verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, + hostname) == 0); + } + +add_pool: + /* + * Add this pool to the list of configs. + */ + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + if (nvlist_add_nvlist(ret, name, config) != 0) + goto nomem; + + found_one = B_TRUE; + nvlist_free(config); + config = NULL; + } + + if (!found_one) { + nvlist_free(ret); + ret = NULL; + } + + return (ret); + +nomem: + (void) no_memory(hdl); +error: + nvlist_free(config); + nvlist_free(ret); + for (c = 0; c < children; c++) + nvlist_free(child[c]); + free(child); + + return (NULL); +} + +/* + * Return the offset of the given label. + */ +static uint64_t +label_offset(uint64_t size, int l) +{ + ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); + return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? + 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); +} + +/* + * Given a file descriptor, read the label information and return an nvlist + * describing the configuration, if there is one. + */ +int +zpool_read_label(int fd, nvlist_t **config) +{ + struct stat64 statbuf; + int l; + vdev_label_t *label; + uint64_t state, txg, size; + + *config = NULL; + + if (fstat64(fd, &statbuf) == -1) + return (0); + size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); + + if ((label = malloc(sizeof (vdev_label_t))) == NULL) + return (-1); + + for (l = 0; l < VDEV_LABELS; l++) { + if (pread64(fd, label, sizeof (vdev_label_t), + label_offset(size, l)) != sizeof (vdev_label_t)) + continue; + + if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, + sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) + continue; + + if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, + &state) != 0 || state > POOL_STATE_L2CACHE) { + nvlist_free(*config); + continue; + } + + if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && + (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, + &txg) != 0 || txg == 0)) { + nvlist_free(*config); + continue; + } + + free(label); + return (0); + } + + free(label); + *config = NULL; + return (0); +} + +/* + * Given a list of directories to search, find all pools stored on disk. This + * includes partial pools which are not available to import. If no args are + * given (argc is 0), then the default directory (/dev/dsk) is searched. + * poolname or guid (but not both) are provided by the caller when trying + * to import a specific pool. + */ +static nvlist_t * +zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv, + boolean_t active_ok, char *poolname, uint64_t guid) +{ + int i; + DIR *dirp = NULL; + struct dirent64 *dp; + char path[MAXPATHLEN]; + char *end; + size_t pathleft; + struct stat64 statbuf; + nvlist_t *ret = NULL, *config; + static char *default_dir = "/dev/dsk"; + int fd; + pool_list_t pools = { 0 }; + pool_entry_t *pe, *penext; + vdev_entry_t *ve, *venext; + config_entry_t *ce, *cenext; + name_entry_t *ne, *nenext; + + verify(poolname == NULL || guid == 0); + + if (argc == 0) { + argc = 1; + argv = &default_dir; + } + + /* + * Go through and read the label configuration information from every + * possible device, organizing the information according to pool GUID + * and toplevel GUID. + */ + for (i = 0; i < argc; i++) { + char *rdsk; + int dfd; + + /* use realpath to normalize the path */ + if (realpath(argv[i], path) == 0) { + (void) zfs_error_fmt(hdl, EZFS_BADPATH, + dgettext(TEXT_DOMAIN, "cannot open '%s'"), + argv[i]); + goto error; + } + end = &path[strlen(path)]; + *end++ = '/'; + *end = 0; + pathleft = &path[sizeof (path)] - end; + + /* + * Using raw devices instead of block devices when we're + * reading the labels skips a bunch of slow operations during + * close(2) processing, so we replace /dev/dsk with /dev/rdsk. + */ + if (strcmp(path, "/dev/dsk/") == 0) + rdsk = "/dev/rdsk/"; + else + rdsk = path; + + if ((dfd = open64(rdsk, O_RDONLY)) < 0 || + (dirp = fdopendir(dfd)) == NULL) { + zfs_error_aux(hdl, strerror(errno)); + (void) zfs_error_fmt(hdl, EZFS_BADPATH, + dgettext(TEXT_DOMAIN, "cannot open '%s'"), + rdsk); + goto error; + } + + /* + * This is not MT-safe, but we have no MT consumers of libzfs + */ + while ((dp = readdir64(dirp)) != NULL) { + const char *name = dp->d_name; + if (name[0] == '.' && + (name[1] == 0 || (name[1] == '.' && name[2] == 0))) + continue; + + if ((fd = openat64(dfd, name, O_RDONLY)) < 0) + continue; + + /* + * Ignore failed stats. We only want regular + * files, character devs and block devs. + */ + if (fstat64(fd, &statbuf) != 0 || + (!S_ISREG(statbuf.st_mode) && + !S_ISCHR(statbuf.st_mode) && + !S_ISBLK(statbuf.st_mode))) { + (void) close(fd); + continue; + } + + if ((zpool_read_label(fd, &config)) != 0) { + (void) close(fd); + (void) no_memory(hdl); + goto error; + } + + (void) close(fd); + + if (config != NULL) { + boolean_t matched = B_TRUE; + + if (poolname != NULL) { + char *pname; + + matched = nvlist_lookup_string(config, + ZPOOL_CONFIG_POOL_NAME, + &pname) == 0 && + strcmp(poolname, pname) == 0; + } else if (guid != 0) { + uint64_t this_guid; + + matched = nvlist_lookup_uint64(config, + ZPOOL_CONFIG_POOL_GUID, + &this_guid) == 0 && + guid == this_guid; + } + if (!matched) { + nvlist_free(config); + config = NULL; + continue; + } + /* use the non-raw path for the config */ + (void) strlcpy(end, name, pathleft); + if (add_config(hdl, &pools, path, config) != 0) + goto error; + } + } + + (void) closedir(dirp); + dirp = NULL; + } + + ret = get_configs(hdl, &pools, active_ok); + +error: + for (pe = pools.pools; pe != NULL; pe = penext) { + penext = pe->pe_next; + for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { + venext = ve->ve_next; + for (ce = ve->ve_configs; ce != NULL; ce = cenext) { + cenext = ce->ce_next; + if (ce->ce_config) + nvlist_free(ce->ce_config); + free(ce); + } + free(ve); + } + free(pe); + } + + for (ne = pools.names; ne != NULL; ne = nenext) { + nenext = ne->ne_next; + if (ne->ne_name) + free(ne->ne_name); + free(ne); + } + + if (dirp) + (void) closedir(dirp); + + return (ret); +} + +nvlist_t * +zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) +{ + return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0)); +} + +nvlist_t * +zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv, + char *pool) +{ + return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0)); +} + +nvlist_t * +zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv, + uint64_t guid) +{ + return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid)); +} + +nvlist_t * +zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv) +{ + return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0)); +} + +/* + * Given a cache file, return the contents as a list of importable pools. + * poolname or guid (but not both) are provided by the caller when trying + * to import a specific pool. + */ +nvlist_t * +zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, + char *poolname, uint64_t guid) +{ + char *buf; + int fd; + struct stat64 statbuf; + nvlist_t *raw, *src, *dst; + nvlist_t *pools; + nvpair_t *elem; + char *name; + uint64_t this_guid; + boolean_t active; + + verify(poolname == NULL || guid == 0); + + if ((fd = open(cachefile, O_RDONLY)) < 0) { + zfs_error_aux(hdl, "%s", strerror(errno)); + (void) zfs_error(hdl, EZFS_BADCACHE, + dgettext(TEXT_DOMAIN, "failed to open cache file")); + return (NULL); + } + + if (fstat64(fd, &statbuf) != 0) { + zfs_error_aux(hdl, "%s", strerror(errno)); + (void) close(fd); + (void) zfs_error(hdl, EZFS_BADCACHE, + dgettext(TEXT_DOMAIN, "failed to get size of cache file")); + return (NULL); + } + + if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { + (void) close(fd); + return (NULL); + } + + if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { + (void) close(fd); + free(buf); + (void) zfs_error(hdl, EZFS_BADCACHE, + dgettext(TEXT_DOMAIN, + "failed to read cache file contents")); + return (NULL); + } + + (void) close(fd); + + if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { + free(buf); + (void) zfs_error(hdl, EZFS_BADCACHE, + dgettext(TEXT_DOMAIN, + "invalid or corrupt cache file contents")); + return (NULL); + } + + free(buf); + + /* + * Go through and get the current state of the pools and refresh their + * state. + */ + if (nvlist_alloc(&pools, 0, 0) != 0) { + (void) no_memory(hdl); + nvlist_free(raw); + return (NULL); + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { + verify(nvpair_value_nvlist(elem, &src) == 0); + + verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + if (poolname != NULL && strcmp(poolname, name) != 0) + continue; + + verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, + &this_guid) == 0); + if (guid != 0) { + verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, + &this_guid) == 0); + if (guid != this_guid) + continue; + } + + if (pool_active(hdl, name, this_guid, &active) != 0) { + nvlist_free(raw); + nvlist_free(pools); + return (NULL); + } + + if (active) + continue; + + if ((dst = refresh_config(hdl, src)) == NULL) { + nvlist_free(raw); + nvlist_free(pools); + return (NULL); + } + + if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { + (void) no_memory(hdl); + nvlist_free(dst); + nvlist_free(raw); + nvlist_free(pools); + return (NULL); + } + nvlist_free(dst); + } + + nvlist_free(raw); + return (pools); +} + + +boolean_t +find_guid(nvlist_t *nv, uint64_t guid) +{ + uint64_t tmp; + nvlist_t **child; + uint_t c, children; + + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); + if (tmp == guid) + return (B_TRUE); + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) + if (find_guid(child[c], guid)) + return (B_TRUE); + } + + return (B_FALSE); +} + +typedef struct aux_cbdata { + const char *cb_type; + uint64_t cb_guid; + zpool_handle_t *cb_zhp; +} aux_cbdata_t; + +static int +find_aux(zpool_handle_t *zhp, void *data) +{ + aux_cbdata_t *cbp = data; + nvlist_t **list; + uint_t i, count; + uint64_t guid; + nvlist_t *nvroot; + + verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type, + &list, &count) == 0) { + for (i = 0; i < count; i++) { + verify(nvlist_lookup_uint64(list[i], + ZPOOL_CONFIG_GUID, &guid) == 0); + if (guid == cbp->cb_guid) { + cbp->cb_zhp = zhp; + return (1); + } + } + } + + zpool_close(zhp); + return (0); +} + +/* + * Determines if the pool is in use. If so, it returns true and the state of + * the pool as well as the name of the pool. Both strings are allocated and + * must be freed by the caller. + */ +int +zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, + boolean_t *inuse) +{ + nvlist_t *config; + char *name; + boolean_t ret; + uint64_t guid, vdev_guid; + zpool_handle_t *zhp; + nvlist_t *pool_config; + uint64_t stateval, isspare; + aux_cbdata_t cb = { 0 }; + boolean_t isactive; + + *inuse = B_FALSE; + + if (zpool_read_label(fd, &config) != 0) { + (void) no_memory(hdl); + return (-1); + } + + if (config == NULL) + return (0); + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, + &stateval) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, + &vdev_guid) == 0); + + if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) { + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &guid) == 0); + } + + switch (stateval) { + case POOL_STATE_EXPORTED: + ret = B_TRUE; + break; + + case POOL_STATE_ACTIVE: + /* + * For an active pool, we have to determine if it's really part + * of a currently active pool (in which case the pool will exist + * and the guid will be the same), or whether it's part of an + * active pool that was disconnected without being explicitly + * exported. + */ + if (pool_active(hdl, name, guid, &isactive) != 0) { + nvlist_free(config); + return (-1); + } + + if (isactive) { + /* + * Because the device may have been removed while + * offlined, we only report it as active if the vdev is + * still present in the config. Otherwise, pretend like + * it's not in use. + */ + if ((zhp = zpool_open_canfail(hdl, name)) != NULL && + (pool_config = zpool_get_config(zhp, NULL)) + != NULL) { + nvlist_t *nvroot; + + verify(nvlist_lookup_nvlist(pool_config, + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + ret = find_guid(nvroot, vdev_guid); + } else { + ret = B_FALSE; + } + + /* + * If this is an active spare within another pool, we + * treat it like an unused hot spare. This allows the + * user to create a pool with a hot spare that currently + * in use within another pool. Since we return B_TRUE, + * libdiskmgt will continue to prevent generic consumers + * from using the device. + */ + if (ret && nvlist_lookup_uint64(config, + ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) + stateval = POOL_STATE_SPARE; + + if (zhp != NULL) + zpool_close(zhp); + } else { + stateval = POOL_STATE_POTENTIALLY_ACTIVE; + ret = B_TRUE; + } + break; + + case POOL_STATE_SPARE: + /* + * For a hot spare, it can be either definitively in use, or + * potentially active. To determine if it's in use, we iterate + * over all pools in the system and search for one with a spare + * with a matching guid. + * + * Due to the shared nature of spares, we don't actually report + * the potentially active case as in use. This means the user + * can freely create pools on the hot spares of exported pools, + * but to do otherwise makes the resulting code complicated, and + * we end up having to deal with this case anyway. + */ + cb.cb_zhp = NULL; + cb.cb_guid = vdev_guid; + cb.cb_type = ZPOOL_CONFIG_SPARES; + if (zpool_iter(hdl, find_aux, &cb) == 1) { + name = (char *)zpool_get_name(cb.cb_zhp); + ret = TRUE; + } else { + ret = FALSE; + } + break; + + case POOL_STATE_L2CACHE: + + /* + * Check if any pool is currently using this l2cache device. + */ + cb.cb_zhp = NULL; + cb.cb_guid = vdev_guid; + cb.cb_type = ZPOOL_CONFIG_L2CACHE; + if (zpool_iter(hdl, find_aux, &cb) == 1) { + name = (char *)zpool_get_name(cb.cb_zhp); + ret = TRUE; + } else { + ret = FALSE; + } + break; + + default: + ret = B_FALSE; + } + + + if (ret) { + if ((*namestr = zfs_strdup(hdl, name)) == NULL) { + if (cb.cb_zhp) + zpool_close(cb.cb_zhp); + nvlist_free(config); + return (-1); + } + *state = (pool_state_t)stateval; + } + + if (cb.cb_zhp) + zpool_close(cb.cb_zhp); + + nvlist_free(config); + *inuse = ret; + return (0); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_mount.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_mount.c new file mode 100644 index 000000000000..7c5c7f3ecaee --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_mount.c @@ -0,0 +1,1399 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Routines to manage ZFS mounts. We separate all the nasty routines that have + * to deal with the OS. The following functions are the main entry points -- + * they are used by mount and unmount and when changing a filesystem's + * mountpoint. + * + * zfs_is_mounted() + * zfs_mount() + * zfs_unmount() + * zfs_unmountall() + * + * This file also contains the functions used to manage sharing filesystems via + * NFS and iSCSI: + * + * zfs_is_shared() + * zfs_share() + * zfs_unshare() + * + * zfs_is_shared_nfs() + * zfs_is_shared_smb() + * zfs_is_shared_iscsi() + * zfs_share_proto() + * zfs_shareall(); + * zfs_share_iscsi() + * zfs_unshare_nfs() + * zfs_unshare_smb() + * zfs_unshareall_nfs() + * zfs_unshareall_smb() + * zfs_unshareall() + * zfs_unshareall_bypath() + * zfs_unshare_iscsi() + * + * The following functions are available for pool consumers, and will + * mount/unmount and share/unshare all datasets within pool: + * + * zpool_enable_datasets() + * zpool_disable_datasets() + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libzfs_impl.h" + +#include +#include +#define MAXISALEN 257 /* based on sysinfo(2) man page */ + +static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *); +zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **, + zfs_share_proto_t); + +static int (*iscsitgt_zfs_share)(const char *); +static int (*iscsitgt_zfs_unshare)(const char *); +static int (*iscsitgt_zfs_is_shared)(const char *); +static int (*iscsitgt_svc_online)(); + +/* + * The share protocols table must be in the same order as the zfs_share_prot_t + * enum in libzfs_impl.h + */ +typedef struct { + zfs_prop_t p_prop; + char *p_name; + int p_share_err; + int p_unshare_err; +} proto_table_t; + +proto_table_t proto_table[PROTO_END] = { + {ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED}, + {ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED}, +}; + +zfs_share_proto_t nfs_only[] = { + PROTO_NFS, + PROTO_END +}; + +zfs_share_proto_t smb_only[] = { + PROTO_SMB, + PROTO_END +}; +zfs_share_proto_t share_all_proto[] = { + PROTO_NFS, + PROTO_SMB, + PROTO_END +}; + +#pragma init(zfs_iscsi_init) +static void +zfs_iscsi_init(void) +{ + void *libiscsitgt; + + if ((libiscsitgt = dlopen("/lib/libiscsitgt.so.1", + RTLD_LAZY | RTLD_GLOBAL)) == NULL || + (iscsitgt_zfs_share = (int (*)(const char *))dlsym(libiscsitgt, + "iscsitgt_zfs_share")) == NULL || + (iscsitgt_zfs_unshare = (int (*)(const char *))dlsym(libiscsitgt, + "iscsitgt_zfs_unshare")) == NULL || + (iscsitgt_zfs_is_shared = (int (*)(const char *))dlsym(libiscsitgt, + "iscsitgt_zfs_is_shared")) == NULL || + (iscsitgt_svc_online = (int (*)(const char *))dlsym(libiscsitgt, + "iscsitgt_svc_online")) == NULL) { + iscsitgt_zfs_share = NULL; + iscsitgt_zfs_unshare = NULL; + iscsitgt_zfs_is_shared = NULL; + iscsitgt_svc_online = NULL; + } +} + +/* + * Search the sharetab for the given mountpoint and protocol, returning + * a zfs_share_type_t value. + */ +static zfs_share_type_t +is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto) +{ + char buf[MAXPATHLEN], *tab; + char *ptr; + + if (hdl->libzfs_sharetab == NULL) + return (SHARED_NOT_SHARED); + + (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET); + + while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) { + + /* the mountpoint is the first entry on each line */ + if ((tab = strchr(buf, '\t')) == NULL) + continue; + + *tab = '\0'; + if (strcmp(buf, mountpoint) == 0) { + /* + * the protocol field is the third field + * skip over second field + */ + ptr = ++tab; + if ((tab = strchr(ptr, '\t')) == NULL) + continue; + ptr = ++tab; + if ((tab = strchr(ptr, '\t')) == NULL) + continue; + *tab = '\0'; + if (strcmp(ptr, + proto_table[proto].p_name) == 0) { + switch (proto) { + case PROTO_NFS: + return (SHARED_NFS); + case PROTO_SMB: + return (SHARED_SMB); + default: + return (0); + } + } + } + } + + return (SHARED_NOT_SHARED); +} + +/* + * Returns true if the specified directory is empty. If we can't open the + * directory at all, return true so that the mount can fail with a more + * informative error message. + */ +static boolean_t +dir_is_empty(const char *dirname) +{ + DIR *dirp; + struct dirent64 *dp; + + if ((dirp = opendir(dirname)) == NULL) + return (B_TRUE); + + while ((dp = readdir64(dirp)) != NULL) { + + if (strcmp(dp->d_name, ".") == 0 || + strcmp(dp->d_name, "..") == 0) + continue; + + (void) closedir(dirp); + return (B_FALSE); + } + + (void) closedir(dirp); + return (B_TRUE); +} + +/* + * Checks to see if the mount is active. If the filesystem is mounted, we fill + * in 'where' with the current mountpoint, and return 1. Otherwise, we return + * 0. + */ +boolean_t +is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where) +{ + struct mnttab search = { 0 }, entry; + + /* + * Search for the entry in /etc/mnttab. We don't bother getting the + * mountpoint, as we can just search for the special device. This will + * also let us find mounts when the mountpoint is 'legacy'. + */ + search.mnt_special = (char *)special; + search.mnt_fstype = MNTTYPE_ZFS; + + rewind(zfs_hdl->libzfs_mnttab); + if (getmntany(zfs_hdl->libzfs_mnttab, &entry, &search) != 0) + return (B_FALSE); + + if (where != NULL) + *where = zfs_strdup(zfs_hdl, entry.mnt_mountp); + + return (B_TRUE); +} + +boolean_t +zfs_is_mounted(zfs_handle_t *zhp, char **where) +{ + return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where)); +} + +/* + * Returns true if the given dataset is mountable, false otherwise. Returns the + * mountpoint in 'buf'. + */ +static boolean_t +zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen, + zprop_source_t *source) +{ + char sourceloc[ZFS_MAXNAMELEN]; + zprop_source_t sourcetype; + + if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type)) + return (B_FALSE); + + verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen, + &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0); + + if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 || + strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0) + return (B_FALSE); + + if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF) + return (B_FALSE); + + if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) && + getzoneid() == GLOBAL_ZONEID) + return (B_FALSE); + + if (source) + *source = sourcetype; + + return (B_TRUE); +} + +/* + * Mount the given filesystem. + */ +int +zfs_mount(zfs_handle_t *zhp, const char *options, int flags) +{ + struct stat buf; + char mountpoint[ZFS_MAXPROPLEN]; + char mntopts[MNT_LINE_MAX]; + libzfs_handle_t *hdl = zhp->zfs_hdl; + + if (options == NULL) + mntopts[0] = '\0'; + else + (void) strlcpy(mntopts, options, sizeof (mntopts)); + + if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL)) + return (0); + + /* Create the directory if it doesn't already exist */ + if (lstat(mountpoint, &buf) != 0) { + if (mkdirp(mountpoint, 0755) != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "failed to create mountpoint")); + return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, + dgettext(TEXT_DOMAIN, "cannot mount '%s'"), + mountpoint)); + } + } + + /* + * Determine if the mountpoint is empty. If so, refuse to perform the + * mount. We don't perform this check if MS_OVERLAY is specified, which + * would defeat the point. We also avoid this check if 'remount' is + * specified. + */ + if ((flags & MS_OVERLAY) == 0 && + strstr(mntopts, MNTOPT_REMOUNT) == NULL && + !dir_is_empty(mountpoint)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "directory is not empty")); + return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, + dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint)); + } + + /* perform the mount */ + if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags, + MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) { + /* + * Generic errors are nasty, but there are just way too many + * from mount(), and they're well-understood. We pick a few + * common ones to improve upon. + */ + if (errno == EBUSY) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "mountpoint or dataset is busy")); + } else if (errno == EPERM) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "Insufficient privileges")); + } else { + zfs_error_aux(hdl, strerror(errno)); + } + + return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED, + dgettext(TEXT_DOMAIN, "cannot mount '%s'"), + zhp->zfs_name)); + } + + return (0); +} + +/* + * Unmount a single filesystem. + */ +static int +unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags) +{ + if (umount2(mountpoint, flags) != 0) { + zfs_error_aux(hdl, strerror(errno)); + return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED, + dgettext(TEXT_DOMAIN, "cannot unmount '%s'"), + mountpoint)); + } + + return (0); +} + +/* + * Unmount the given filesystem. + */ +int +zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags) +{ + struct mnttab search = { 0 }, entry; + char *mntpt = NULL; + + /* check to see if need to unmount the filesystem */ + search.mnt_special = zhp->zfs_name; + search.mnt_fstype = MNTTYPE_ZFS; + rewind(zhp->zfs_hdl->libzfs_mnttab); + if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && + getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) { + + /* + * mountpoint may have come from a call to + * getmnt/getmntany if it isn't NULL. If it is NULL, + * we know it comes from getmntany which can then get + * overwritten later. We strdup it to play it safe. + */ + if (mountpoint == NULL) + mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp); + else + mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint); + + /* + * Unshare and unmount the filesystem + */ + if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0) + return (-1); + + if (unmount_one(zhp->zfs_hdl, mntpt, flags) != 0) { + free(mntpt); + (void) zfs_shareall(zhp); + return (-1); + } + free(mntpt); + } + + return (0); +} + +/* + * Unmount this filesystem and any children inheriting the mountpoint property. + * To do this, just act like we're changing the mountpoint property, but don't + * remount the filesystems afterwards. + */ +int +zfs_unmountall(zfs_handle_t *zhp, int flags) +{ + prop_changelist_t *clp; + int ret; + + clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags); + if (clp == NULL) + return (-1); + + ret = changelist_prefix(clp); + changelist_free(clp); + + return (ret); +} + +boolean_t +zfs_is_shared(zfs_handle_t *zhp) +{ + zfs_share_type_t rc = 0; + zfs_share_proto_t *curr_proto; + + if (ZFS_IS_VOLUME(zhp)) + return (zfs_is_shared_iscsi(zhp)); + + for (curr_proto = share_all_proto; *curr_proto != PROTO_END; + curr_proto++) + rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto); + + return (rc ? B_TRUE : B_FALSE); +} + +int +zfs_share(zfs_handle_t *zhp) +{ + if (ZFS_IS_VOLUME(zhp)) + return (zfs_share_iscsi(zhp)); + + return (zfs_share_proto(zhp, share_all_proto)); +} + +int +zfs_unshare(zfs_handle_t *zhp) +{ + if (ZFS_IS_VOLUME(zhp)) + return (zfs_unshare_iscsi(zhp)); + + return (zfs_unshareall(zhp)); +} + +/* + * Check to see if the filesystem is currently shared. + */ +zfs_share_type_t +zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto) +{ + char *mountpoint; + zfs_share_type_t rc; + + if (!zfs_is_mounted(zhp, &mountpoint)) + return (SHARED_NOT_SHARED); + + if (rc = is_shared(zhp->zfs_hdl, mountpoint, proto)) { + if (where != NULL) + *where = mountpoint; + else + free(mountpoint); + return (rc); + } else { + free(mountpoint); + return (SHARED_NOT_SHARED); + } +} + +boolean_t +zfs_is_shared_nfs(zfs_handle_t *zhp, char **where) +{ + return (zfs_is_shared_proto(zhp, where, + PROTO_NFS) != SHARED_NOT_SHARED); +} + +boolean_t +zfs_is_shared_smb(zfs_handle_t *zhp, char **where) +{ + return (zfs_is_shared_proto(zhp, where, + PROTO_SMB) != SHARED_NOT_SHARED); +} + +/* + * Make sure things will work if libshare isn't installed by using + * wrapper functions that check to see that the pointers to functions + * initialized in _zfs_init_libshare() are actually present. + */ + +static sa_handle_t (*_sa_init)(int); +static void (*_sa_fini)(sa_handle_t); +static sa_share_t (*_sa_find_share)(sa_handle_t, char *); +static int (*_sa_enable_share)(sa_share_t, char *); +static int (*_sa_disable_share)(sa_share_t, char *); +static char *(*_sa_errorstr)(int); +static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *); +static boolean_t (*_sa_needs_refresh)(sa_handle_t *); +static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t); +static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t, + char *, char *, zprop_source_t, char *, char *, char *); +static void (*_sa_update_sharetab_ts)(sa_handle_t); + +/* + * _zfs_init_libshare() + * + * Find the libshare.so.1 entry points that we use here and save the + * values to be used later. This is triggered by the runtime loader. + * Make sure the correct ISA version is loaded. + */ + +#pragma init(_zfs_init_libshare) +static void +_zfs_init_libshare(void) +{ + void *libshare; + char path[MAXPATHLEN]; + char isa[MAXISALEN]; + +#if defined(_LP64) + if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1) + isa[0] = '\0'; +#else + isa[0] = '\0'; +#endif + (void) snprintf(path, MAXPATHLEN, + "/usr/lib/%s/libshare.so.1", isa); + + if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) { + _sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init"); + _sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini"); + _sa_find_share = (sa_share_t (*)(sa_handle_t, char *)) + dlsym(libshare, "sa_find_share"); + _sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare, + "sa_enable_share"); + _sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare, + "sa_disable_share"); + _sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr"); + _sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *)) + dlsym(libshare, "sa_parse_legacy_options"); + _sa_needs_refresh = (boolean_t (*)(sa_handle_t *)) + dlsym(libshare, "sa_needs_refresh"); + _sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t)) + dlsym(libshare, "sa_get_zfs_handle"); + _sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t, + sa_share_t, char *, char *, zprop_source_t, char *, + char *, char *))dlsym(libshare, "sa_zfs_process_share"); + _sa_update_sharetab_ts = (void (*)(sa_handle_t)) + dlsym(libshare, "sa_update_sharetab_ts"); + if (_sa_init == NULL || _sa_fini == NULL || + _sa_find_share == NULL || _sa_enable_share == NULL || + _sa_disable_share == NULL || _sa_errorstr == NULL || + _sa_parse_legacy_options == NULL || + _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL || + _sa_zfs_process_share == NULL || + _sa_update_sharetab_ts == NULL) { + _sa_init = NULL; + _sa_fini = NULL; + _sa_disable_share = NULL; + _sa_enable_share = NULL; + _sa_errorstr = NULL; + _sa_parse_legacy_options = NULL; + (void) dlclose(libshare); + _sa_needs_refresh = NULL; + _sa_get_zfs_handle = NULL; + _sa_zfs_process_share = NULL; + _sa_update_sharetab_ts = NULL; + } + } +} + +/* + * zfs_init_libshare(zhandle, service) + * + * Initialize the libshare API if it hasn't already been initialized. + * In all cases it returns 0 if it succeeded and an error if not. The + * service value is which part(s) of the API to initialize and is a + * direct map to the libshare sa_init(service) interface. + */ +int +zfs_init_libshare(libzfs_handle_t *zhandle, int service) +{ + int ret = SA_OK; + + if (_sa_init == NULL) + ret = SA_CONFIG_ERR; + + if (ret == SA_OK && zhandle->libzfs_shareflags & ZFSSHARE_MISS) { + /* + * We had a cache miss. Most likely it is a new ZFS + * dataset that was just created. We want to make sure + * so check timestamps to see if a different process + * has updated any of the configuration. If there was + * some non-ZFS change, we need to re-initialize the + * internal cache. + */ + zhandle->libzfs_shareflags &= ~ZFSSHARE_MISS; + if (_sa_needs_refresh != NULL && + _sa_needs_refresh(zhandle->libzfs_sharehdl)) { + zfs_uninit_libshare(zhandle); + zhandle->libzfs_sharehdl = _sa_init(service); + } + } + + if (ret == SA_OK && zhandle && zhandle->libzfs_sharehdl == NULL) + zhandle->libzfs_sharehdl = _sa_init(service); + + if (ret == SA_OK && zhandle->libzfs_sharehdl == NULL) + ret = SA_NO_MEMORY; + + return (ret); +} + +/* + * zfs_uninit_libshare(zhandle) + * + * Uninitialize the libshare API if it hasn't already been + * uninitialized. It is OK to call multiple times. + */ +void +zfs_uninit_libshare(libzfs_handle_t *zhandle) +{ + if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) { + if (_sa_fini != NULL) + _sa_fini(zhandle->libzfs_sharehdl); + zhandle->libzfs_sharehdl = NULL; + } +} + +/* + * zfs_parse_options(options, proto) + * + * Call the legacy parse interface to get the protocol specific + * options using the NULL arg to indicate that this is a "parse" only. + */ +int +zfs_parse_options(char *options, zfs_share_proto_t proto) +{ + if (_sa_parse_legacy_options != NULL) { + return (_sa_parse_legacy_options(NULL, options, + proto_table[proto].p_name)); + } + return (SA_CONFIG_ERR); +} + +/* + * zfs_sa_find_share(handle, path) + * + * wrapper around sa_find_share to find a share path in the + * configuration. + */ +static sa_share_t +zfs_sa_find_share(sa_handle_t handle, char *path) +{ + if (_sa_find_share != NULL) + return (_sa_find_share(handle, path)); + return (NULL); +} + +/* + * zfs_sa_enable_share(share, proto) + * + * Wrapper for sa_enable_share which enables a share for a specified + * protocol. + */ +static int +zfs_sa_enable_share(sa_share_t share, char *proto) +{ + if (_sa_enable_share != NULL) + return (_sa_enable_share(share, proto)); + return (SA_CONFIG_ERR); +} + +/* + * zfs_sa_disable_share(share, proto) + * + * Wrapper for sa_enable_share which disables a share for a specified + * protocol. + */ +static int +zfs_sa_disable_share(sa_share_t share, char *proto) +{ + if (_sa_disable_share != NULL) + return (_sa_disable_share(share, proto)); + return (SA_CONFIG_ERR); +} + +/* + * Share the given filesystem according to the options in the specified + * protocol specific properties (sharenfs, sharesmb). We rely + * on "libshare" to the dirty work for us. + */ +static int +zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto) +{ + char mountpoint[ZFS_MAXPROPLEN]; + char shareopts[ZFS_MAXPROPLEN]; + char sourcestr[ZFS_MAXPROPLEN]; + libzfs_handle_t *hdl = zhp->zfs_hdl; + sa_share_t share; + zfs_share_proto_t *curr_proto; + zprop_source_t sourcetype; + int ret; + + if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL)) + return (0); + + if ((ret = zfs_init_libshare(hdl, SA_INIT_SHARE_API)) != SA_OK) { + (void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED, + dgettext(TEXT_DOMAIN, "cannot share '%s': %s"), + zfs_get_name(zhp), _sa_errorstr != NULL ? + _sa_errorstr(ret) : ""); + return (-1); + } + + for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) { + /* + * Return success if there are no share options. + */ + if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop, + shareopts, sizeof (shareopts), &sourcetype, sourcestr, + ZFS_MAXPROPLEN, B_FALSE) != 0 || + strcmp(shareopts, "off") == 0) + continue; + + /* + * If the 'zoned' property is set, then zfs_is_mountable() + * will have already bailed out if we are in the global zone. + * But local zones cannot be NFS servers, so we ignore it for + * local zones as well. + */ + if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) + continue; + + share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint); + if (share == NULL) { + /* + * This may be a new file system that was just + * created so isn't in the internal cache + * (second time through). Rather than + * reloading the entire configuration, we can + * assume ZFS has done the checking and it is + * safe to add this to the internal + * configuration. + */ + if (_sa_zfs_process_share(hdl->libzfs_sharehdl, + NULL, NULL, mountpoint, + proto_table[*curr_proto].p_name, sourcetype, + shareopts, sourcestr, zhp->zfs_name) != SA_OK) { + (void) zfs_error_fmt(hdl, + proto_table[*curr_proto].p_share_err, + dgettext(TEXT_DOMAIN, "cannot share '%s'"), + zfs_get_name(zhp)); + return (-1); + } + hdl->libzfs_shareflags |= ZFSSHARE_MISS; + share = zfs_sa_find_share(hdl->libzfs_sharehdl, + mountpoint); + } + if (share != NULL) { + int err; + err = zfs_sa_enable_share(share, + proto_table[*curr_proto].p_name); + if (err != SA_OK) { + (void) zfs_error_fmt(hdl, + proto_table[*curr_proto].p_share_err, + dgettext(TEXT_DOMAIN, "cannot share '%s'"), + zfs_get_name(zhp)); + return (-1); + } + } else { + (void) zfs_error_fmt(hdl, + proto_table[*curr_proto].p_share_err, + dgettext(TEXT_DOMAIN, "cannot share '%s'"), + zfs_get_name(zhp)); + return (-1); + } + + } + return (0); +} + + +int +zfs_share_nfs(zfs_handle_t *zhp) +{ + return (zfs_share_proto(zhp, nfs_only)); +} + +int +zfs_share_smb(zfs_handle_t *zhp) +{ + return (zfs_share_proto(zhp, smb_only)); +} + +int +zfs_shareall(zfs_handle_t *zhp) +{ + return (zfs_share_proto(zhp, share_all_proto)); +} + +/* + * Unshare a filesystem by mountpoint. + */ +static int +unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint, + zfs_share_proto_t proto) +{ + sa_share_t share; + int err; + char *mntpt; + /* + * Mountpoint could get trashed if libshare calls getmntany + * which id does during API initialization, so strdup the + * value. + */ + mntpt = zfs_strdup(hdl, mountpoint); + + /* make sure libshare initialized */ + if ((err = zfs_init_libshare(hdl, SA_INIT_SHARE_API)) != SA_OK) { + free(mntpt); /* don't need the copy anymore */ + return (zfs_error_fmt(hdl, EZFS_SHARENFSFAILED, + dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"), + name, _sa_errorstr(err))); + } + + share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt); + free(mntpt); /* don't need the copy anymore */ + + if (share != NULL) { + err = zfs_sa_disable_share(share, proto_table[proto].p_name); + if (err != SA_OK) { + return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED, + dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"), + name, _sa_errorstr(err))); + } + } else { + return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED, + dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"), + name)); + } + return (0); +} + +/* + * Unshare the given filesystem. + */ +int +zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint, + zfs_share_proto_t *proto) +{ + struct mnttab search = { 0 }, entry; + char *mntpt = NULL; + + /* check to see if need to unmount the filesystem */ + search.mnt_special = (char *)zfs_get_name(zhp); + search.mnt_fstype = MNTTYPE_ZFS; + rewind(zhp->zfs_hdl->libzfs_mnttab); + if (mountpoint != NULL) + mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint); + + if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) && + getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) { + zfs_share_proto_t *curr_proto; + + if (mountpoint == NULL) + mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp); + + for (curr_proto = proto; *curr_proto != PROTO_END; + curr_proto++) { + + if (is_shared(zhp->zfs_hdl, mntpt, *curr_proto) && + unshare_one(zhp->zfs_hdl, zhp->zfs_name, + mntpt, *curr_proto) != 0) { + if (mntpt != NULL) + free(mntpt); + return (-1); + } + } + } + if (mntpt != NULL) + free(mntpt); + + return (0); +} + +int +zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint) +{ + return (zfs_unshare_proto(zhp, mountpoint, nfs_only)); +} + +int +zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint) +{ + return (zfs_unshare_proto(zhp, mountpoint, smb_only)); +} + +/* + * Same as zfs_unmountall(), but for NFS and SMB unshares. + */ +int +zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto) +{ + prop_changelist_t *clp; + int ret; + + clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0); + if (clp == NULL) + return (-1); + + ret = changelist_unshare(clp, proto); + changelist_free(clp); + + return (ret); +} + +int +zfs_unshareall_nfs(zfs_handle_t *zhp) +{ + return (zfs_unshareall_proto(zhp, nfs_only)); +} + +int +zfs_unshareall_smb(zfs_handle_t *zhp) +{ + return (zfs_unshareall_proto(zhp, smb_only)); +} + +int +zfs_unshareall(zfs_handle_t *zhp) +{ + return (zfs_unshareall_proto(zhp, share_all_proto)); +} + +int +zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint) +{ + return (zfs_unshare_proto(zhp, mountpoint, share_all_proto)); +} + +/* + * Remove the mountpoint associated with the current dataset, if necessary. + * We only remove the underlying directory if: + * + * - The mountpoint is not 'none' or 'legacy' + * - The mountpoint is non-empty + * - The mountpoint is the default or inherited + * - The 'zoned' property is set, or we're in a local zone + * + * Any other directories we leave alone. + */ +void +remove_mountpoint(zfs_handle_t *zhp) +{ + char mountpoint[ZFS_MAXPROPLEN]; + zprop_source_t source; + + if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), + &source)) + return; + + if (source == ZPROP_SRC_DEFAULT || + source == ZPROP_SRC_INHERITED) { + /* + * Try to remove the directory, silently ignoring any errors. + * The filesystem may have since been removed or moved around, + * and this error isn't really useful to the administrator in + * any way. + */ + (void) rmdir(mountpoint); + } +} + +boolean_t +zfs_is_shared_iscsi(zfs_handle_t *zhp) +{ + + /* + * If iscsi deamon isn't running then we aren't shared + */ + if (iscsitgt_svc_online && iscsitgt_svc_online() == 1) + return (B_FALSE); + else + return (iscsitgt_zfs_is_shared != NULL && + iscsitgt_zfs_is_shared(zhp->zfs_name) != 0); +} + +int +zfs_share_iscsi(zfs_handle_t *zhp) +{ + char shareopts[ZFS_MAXPROPLEN]; + const char *dataset = zhp->zfs_name; + libzfs_handle_t *hdl = zhp->zfs_hdl; + + /* + * Return success if there are no share options. + */ + if (zfs_prop_get(zhp, ZFS_PROP_SHAREISCSI, shareopts, + sizeof (shareopts), NULL, NULL, 0, B_FALSE) != 0 || + strcmp(shareopts, "off") == 0) + return (0); + + if (iscsitgt_zfs_share == NULL || iscsitgt_zfs_share(dataset) != 0) { + int error = EZFS_SHAREISCSIFAILED; + + /* + * If service isn't availabele and EPERM was + * returned then use special error. + */ + if (iscsitgt_svc_online && errno == EPERM && + (iscsitgt_svc_online() != 0)) + error = EZFS_ISCSISVCUNAVAIL; + + return (zfs_error_fmt(hdl, error, + dgettext(TEXT_DOMAIN, "cannot share '%s'"), dataset)); + } + + return (0); +} + +int +zfs_unshare_iscsi(zfs_handle_t *zhp) +{ + const char *dataset = zfs_get_name(zhp); + libzfs_handle_t *hdl = zhp->zfs_hdl; + + /* + * Return if the volume is not shared + */ + if (zfs_is_shared_iscsi(zhp) != SHARED_ISCSI) + return (0); + + /* + * If this fails with ENODEV it indicates that zvol wasn't shared so + * we should return success in that case. + */ + if (iscsitgt_zfs_unshare == NULL || + (iscsitgt_zfs_unshare(dataset) != 0 && errno != ENODEV)) { + if (errno == EPERM) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "Insufficient privileges to unshare iscsi")); + return (zfs_error_fmt(hdl, EZFS_UNSHAREISCSIFAILED, + dgettext(TEXT_DOMAIN, "cannot unshare '%s'"), dataset)); + } + + return (0); +} + +typedef struct mount_cbdata { + zfs_handle_t **cb_datasets; + int cb_used; + int cb_alloc; +} mount_cbdata_t; + +static int +mount_cb(zfs_handle_t *zhp, void *data) +{ + mount_cbdata_t *cbp = data; + + if (!(zfs_get_type(zhp) & (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME))) { + zfs_close(zhp); + return (0); + } + + if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) { + zfs_close(zhp); + return (0); + } + + if (cbp->cb_alloc == cbp->cb_used) { + void *ptr; + + if ((ptr = zfs_realloc(zhp->zfs_hdl, + cbp->cb_datasets, cbp->cb_alloc * sizeof (void *), + cbp->cb_alloc * 2 * sizeof (void *))) == NULL) + return (-1); + cbp->cb_datasets = ptr; + + cbp->cb_alloc *= 2; + } + + cbp->cb_datasets[cbp->cb_used++] = zhp; + + return (zfs_iter_filesystems(zhp, mount_cb, cbp)); +} + +static int +dataset_cmp(const void *a, const void *b) +{ + zfs_handle_t **za = (zfs_handle_t **)a; + zfs_handle_t **zb = (zfs_handle_t **)b; + char mounta[MAXPATHLEN]; + char mountb[MAXPATHLEN]; + boolean_t gota, gotb; + + if ((gota = (zfs_get_type(*za) == ZFS_TYPE_FILESYSTEM)) != 0) + verify(zfs_prop_get(*za, ZFS_PROP_MOUNTPOINT, mounta, + sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0); + if ((gotb = (zfs_get_type(*zb) == ZFS_TYPE_FILESYSTEM)) != 0) + verify(zfs_prop_get(*zb, ZFS_PROP_MOUNTPOINT, mountb, + sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0); + + if (gota && gotb) + return (strcmp(mounta, mountb)); + + if (gota) + return (-1); + if (gotb) + return (1); + + return (strcmp(zfs_get_name(a), zfs_get_name(b))); +} + +/* + * Mount and share all datasets within the given pool. This assumes that no + * datasets within the pool are currently mounted. Because users can create + * complicated nested hierarchies of mountpoints, we first gather all the + * datasets and mountpoints within the pool, and sort them by mountpoint. Once + * we have the list of all filesystems, we iterate over them in order and mount + * and/or share each one. + */ +#pragma weak zpool_mount_datasets = zpool_enable_datasets +int +zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags) +{ + mount_cbdata_t cb = { 0 }; + libzfs_handle_t *hdl = zhp->zpool_hdl; + zfs_handle_t *zfsp; + int i, ret = -1; + int *good; + + /* + * Gather all non-snap datasets within the pool. + */ + if ((cb.cb_datasets = zfs_alloc(hdl, 4 * sizeof (void *))) == NULL) + return (-1); + cb.cb_alloc = 4; + + if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_DATASET)) == NULL) + goto out; + + cb.cb_datasets[0] = zfsp; + cb.cb_used = 1; + + if (zfs_iter_filesystems(zfsp, mount_cb, &cb) != 0) + goto out; + + /* + * Sort the datasets by mountpoint. + */ + qsort(cb.cb_datasets, cb.cb_used, sizeof (void *), dataset_cmp); + + /* + * And mount all the datasets, keeping track of which ones + * succeeded or failed. By using zfs_alloc(), the good pointer + * will always be non-NULL. + */ + good = zfs_alloc(zhp->zpool_hdl, cb.cb_used * sizeof (int)); + ret = 0; + for (i = 0; i < cb.cb_used; i++) { + if (zfs_mount(cb.cb_datasets[i], mntopts, flags) != 0) + ret = -1; + else + good[i] = 1; + } + + /* + * Then share all the ones that need to be shared. This needs + * to be a separate pass in order to avoid excessive reloading + * of the configuration. Good should never be NULL since + * zfs_alloc is supposed to exit if memory isn't available. + */ + for (i = 0; i < cb.cb_used; i++) { + if (good[i] && zfs_share(cb.cb_datasets[i]) != 0) + ret = -1; + } + + free(good); + +out: + for (i = 0; i < cb.cb_used; i++) + zfs_close(cb.cb_datasets[i]); + free(cb.cb_datasets); + + return (ret); +} + + +static int +zvol_cb(const char *dataset, void *data) +{ + libzfs_handle_t *hdl = data; + zfs_handle_t *zhp; + + /* + * Ignore snapshots and ignore failures from non-existant datasets. + */ + if (strchr(dataset, '@') != NULL || + (zhp = zfs_open(hdl, dataset, ZFS_TYPE_VOLUME)) == NULL) + return (0); + + if (zfs_unshare_iscsi(zhp) != 0) + return (-1); + + zfs_close(zhp); + + return (0); +} + +static int +mountpoint_compare(const void *a, const void *b) +{ + const char *mounta = *((char **)a); + const char *mountb = *((char **)b); + + return (strcmp(mountb, mounta)); +} + +/* + * Unshare and unmount all datasets within the given pool. We don't want to + * rely on traversing the DSL to discover the filesystems within the pool, + * because this may be expensive (if not all of them are mounted), and can fail + * arbitrarily (on I/O error, for example). Instead, we walk /etc/mnttab and + * gather all the filesystems that are currently mounted. + */ +#pragma weak zpool_unmount_datasets = zpool_disable_datasets +int +zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force) +{ + int used, alloc; + struct mnttab entry; + size_t namelen; + char **mountpoints = NULL; + zfs_handle_t **datasets = NULL; + libzfs_handle_t *hdl = zhp->zpool_hdl; + int i; + int ret = -1; + int flags = (force ? MS_FORCE : 0); + + /* + * First unshare all zvols. + */ + if (zpool_iter_zvol(zhp, zvol_cb, hdl) != 0) + return (-1); + + namelen = strlen(zhp->zpool_name); + + rewind(hdl->libzfs_mnttab); + used = alloc = 0; + while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { + /* + * Ignore non-ZFS entries. + */ + if (entry.mnt_fstype == NULL || + strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) + continue; + + /* + * Ignore filesystems not within this pool. + */ + if (entry.mnt_mountp == NULL || + strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 || + (entry.mnt_special[namelen] != '/' && + entry.mnt_special[namelen] != '\0')) + continue; + + /* + * At this point we've found a filesystem within our pool. Add + * it to our growing list. + */ + if (used == alloc) { + if (alloc == 0) { + if ((mountpoints = zfs_alloc(hdl, + 8 * sizeof (void *))) == NULL) + goto out; + + if ((datasets = zfs_alloc(hdl, + 8 * sizeof (void *))) == NULL) + goto out; + + alloc = 8; + } else { + void *ptr; + + if ((ptr = zfs_realloc(hdl, mountpoints, + alloc * sizeof (void *), + alloc * 2 * sizeof (void *))) == NULL) + goto out; + mountpoints = ptr; + + if ((ptr = zfs_realloc(hdl, datasets, + alloc * sizeof (void *), + alloc * 2 * sizeof (void *))) == NULL) + goto out; + datasets = ptr; + + alloc *= 2; + } + } + + if ((mountpoints[used] = zfs_strdup(hdl, + entry.mnt_mountp)) == NULL) + goto out; + + /* + * This is allowed to fail, in case there is some I/O error. It + * is only used to determine if we need to remove the underlying + * mountpoint, so failure is not fatal. + */ + datasets[used] = make_dataset_handle(hdl, entry.mnt_special); + + used++; + } + + /* + * At this point, we have the entire list of filesystems, so sort it by + * mountpoint. + */ + qsort(mountpoints, used, sizeof (char *), mountpoint_compare); + + /* + * Walk through and first unshare everything. + */ + for (i = 0; i < used; i++) { + zfs_share_proto_t *curr_proto; + for (curr_proto = share_all_proto; *curr_proto != PROTO_END; + curr_proto++) { + if (is_shared(hdl, mountpoints[i], *curr_proto) && + unshare_one(hdl, mountpoints[i], + mountpoints[i], *curr_proto) != 0) + goto out; + } + } + + /* + * Now unmount everything, removing the underlying directories as + * appropriate. + */ + for (i = 0; i < used; i++) { + if (unmount_one(hdl, mountpoints[i], flags) != 0) + goto out; + } + + for (i = 0; i < used; i++) { + if (datasets[i]) + remove_mountpoint(datasets[i]); + } + + ret = 0; +out: + for (i = 0; i < used; i++) { + if (datasets[i]) + zfs_close(datasets[i]); + free(mountpoints[i]); + } + free(datasets); + free(mountpoints); + + return (ret); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_pool.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_pool.c new file mode 100644 index 000000000000..18ceb4859654 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_pool.c @@ -0,0 +1,3064 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zfs_namecheck.h" +#include "zfs_prop.h" +#include "libzfs_impl.h" + +static int read_efi_label(nvlist_t *config, diskaddr_t *sb); + +#if defined(__i386) || defined(__amd64) +#define BOOTCMD "installgrub(1M)" +#else +#define BOOTCMD "installboot(1M)" +#endif + +/* + * ==================================================================== + * zpool property functions + * ==================================================================== + */ + +static int +zpool_get_all_props(zpool_handle_t *zhp) +{ + zfs_cmd_t zc = { 0 }; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + + if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) + return (-1); + + while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { + if (errno == ENOMEM) { + if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + } else { + zcmd_free_nvlists(&zc); + return (-1); + } + } + + if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { + zcmd_free_nvlists(&zc); + return (-1); + } + + zcmd_free_nvlists(&zc); + + return (0); +} + +static int +zpool_props_refresh(zpool_handle_t *zhp) +{ + nvlist_t *old_props; + + old_props = zhp->zpool_props; + + if (zpool_get_all_props(zhp) != 0) + return (-1); + + nvlist_free(old_props); + return (0); +} + +static char * +zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, + zprop_source_t *src) +{ + nvlist_t *nv, *nvl; + uint64_t ival; + char *value; + zprop_source_t source; + + nvl = zhp->zpool_props; + if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { + verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); + source = ival; + verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); + } else { + source = ZPROP_SRC_DEFAULT; + if ((value = (char *)zpool_prop_default_string(prop)) == NULL) + value = "-"; + } + + if (src) + *src = source; + + return (value); +} + +uint64_t +zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) +{ + nvlist_t *nv, *nvl; + uint64_t value; + zprop_source_t source; + + if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { + /* + * zpool_get_all_props() has most likely failed because + * the pool is faulted, but if all we need is the top level + * vdev's guid then get it from the zhp config nvlist. + */ + if ((prop == ZPOOL_PROP_GUID) && + (nvlist_lookup_nvlist(zhp->zpool_config, + ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && + (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) + == 0)) { + return (value); + } + return (zpool_prop_default_numeric(prop)); + } + + nvl = zhp->zpool_props; + if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { + verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); + source = value; + verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); + } else { + source = ZPROP_SRC_DEFAULT; + value = zpool_prop_default_numeric(prop); + } + + if (src) + *src = source; + + return (value); +} + +/* + * Map VDEV STATE to printed strings. + */ +char * +zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) +{ + switch (state) { + case VDEV_STATE_CLOSED: + case VDEV_STATE_OFFLINE: + return (gettext("OFFLINE")); + case VDEV_STATE_REMOVED: + return (gettext("REMOVED")); + case VDEV_STATE_CANT_OPEN: + if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) + return (gettext("FAULTED")); + else + return (gettext("UNAVAIL")); + case VDEV_STATE_FAULTED: + return (gettext("FAULTED")); + case VDEV_STATE_DEGRADED: + return (gettext("DEGRADED")); + case VDEV_STATE_HEALTHY: + return (gettext("ONLINE")); + } + + return (gettext("UNKNOWN")); +} + +/* + * Get a zpool property value for 'prop' and return the value in + * a pre-allocated buffer. + */ +int +zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, + zprop_source_t *srctype) +{ + uint64_t intval; + const char *strval; + zprop_source_t src = ZPROP_SRC_NONE; + nvlist_t *nvroot; + vdev_stat_t *vs; + uint_t vsc; + + if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { + if (prop == ZPOOL_PROP_NAME) + (void) strlcpy(buf, zpool_get_name(zhp), len); + else if (prop == ZPOOL_PROP_HEALTH) + (void) strlcpy(buf, "FAULTED", len); + else + (void) strlcpy(buf, "-", len); + return (0); + } + + if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && + prop != ZPOOL_PROP_NAME) + return (-1); + + switch (zpool_prop_get_type(prop)) { + case PROP_TYPE_STRING: + (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), + len); + break; + + case PROP_TYPE_NUMBER: + intval = zpool_get_prop_int(zhp, prop, &src); + + switch (prop) { + case ZPOOL_PROP_SIZE: + case ZPOOL_PROP_USED: + case ZPOOL_PROP_AVAILABLE: + (void) zfs_nicenum(intval, buf, len); + break; + + case ZPOOL_PROP_CAPACITY: + (void) snprintf(buf, len, "%llu%%", + (u_longlong_t)intval); + break; + + case ZPOOL_PROP_HEALTH: + verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + verify(nvlist_lookup_uint64_array(nvroot, + ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); + + (void) strlcpy(buf, zpool_state_to_name(intval, + vs->vs_aux), len); + break; + default: + (void) snprintf(buf, len, "%llu", intval); + } + break; + + case PROP_TYPE_INDEX: + intval = zpool_get_prop_int(zhp, prop, &src); + if (zpool_prop_index_to_string(prop, intval, &strval) + != 0) + return (-1); + (void) strlcpy(buf, strval, len); + break; + + default: + abort(); + } + + if (srctype) + *srctype = src; + + return (0); +} + +/* + * Check if the bootfs name has the same pool name as it is set to. + * Assuming bootfs is a valid dataset name. + */ +static boolean_t +bootfs_name_valid(const char *pool, char *bootfs) +{ + int len = strlen(pool); + + if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) + return (B_FALSE); + + if (strncmp(pool, bootfs, len) == 0 && + (bootfs[len] == '/' || bootfs[len] == '\0')) + return (B_TRUE); + + return (B_FALSE); +} + +/* + * Inspect the configuration to determine if any of the devices contain + * an EFI label. + */ +static boolean_t +pool_uses_efi(nvlist_t *config) +{ + nvlist_t **child; + uint_t c, children; + + if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + return (read_efi_label(config, NULL) >= 0); + + for (c = 0; c < children; c++) { + if (pool_uses_efi(child[c])) + return (B_TRUE); + } + return (B_FALSE); +} + +static boolean_t +pool_is_bootable(zpool_handle_t *zhp) +{ + char bootfs[ZPOOL_MAXNAMELEN]; + + return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, + sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", + sizeof (bootfs)) != 0); +} + + +/* + * Given an nvlist of zpool properties to be set, validate that they are + * correct, and parse any numeric properties (index, boolean, etc) if they are + * specified as strings. + */ +static nvlist_t * +zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, + nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) +{ + nvpair_t *elem; + nvlist_t *retprops; + zpool_prop_t prop; + char *strval; + uint64_t intval; + char *slash; + struct stat64 statbuf; + zpool_handle_t *zhp; + nvlist_t *nvroot; + + if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { + (void) no_memory(hdl); + return (NULL); + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { + const char *propname = nvpair_name(elem); + + /* + * Make sure this property is valid and applies to this type. + */ + if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid property '%s'"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + if (zpool_prop_readonly(prop)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " + "is readonly"), propname); + (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); + goto error; + } + + if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, + &strval, &intval, errbuf) != 0) + goto error; + + /* + * Perform additional checking for specific properties. + */ + switch (prop) { + case ZPOOL_PROP_VERSION: + if (intval < version || intval > SPA_VERSION) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property '%s' number %d is invalid."), + propname, intval); + (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); + goto error; + } + break; + + case ZPOOL_PROP_BOOTFS: + if (create_or_import) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property '%s' cannot be set at creation " + "or import time"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + if (version < SPA_VERSION_BOOTFS) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool must be upgraded to support " + "'%s' property"), propname); + (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); + goto error; + } + + /* + * bootfs property value has to be a dataset name and + * the dataset has to be in the same pool as it sets to. + */ + if (strval[0] != '\0' && !bootfs_name_valid(poolname, + strval)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " + "is an invalid name"), strval); + (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); + goto error; + } + + if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "could not open pool '%s'"), poolname); + (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); + goto error; + } + verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + + /* + * bootfs property cannot be set on a disk which has + * been EFI labeled. + */ + if (pool_uses_efi(nvroot)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property '%s' not supported on " + "EFI labeled devices"), propname); + (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); + zpool_close(zhp); + goto error; + } + zpool_close(zhp); + break; + + case ZPOOL_PROP_ALTROOT: + if (!create_or_import) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property '%s' can only be set during pool " + "creation or import"), propname); + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + goto error; + } + + if (strval[0] != '/') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "bad alternate root '%s'"), strval); + (void) zfs_error(hdl, EZFS_BADPATH, errbuf); + goto error; + } + break; + + case ZPOOL_PROP_CACHEFILE: + if (strval[0] == '\0') + break; + + if (strcmp(strval, "none") == 0) + break; + + if (strval[0] != '/') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "property '%s' must be empty, an " + "absolute path, or 'none'"), propname); + (void) zfs_error(hdl, EZFS_BADPATH, errbuf); + goto error; + } + + slash = strrchr(strval, '/'); + + if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || + strcmp(slash, "/..") == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' is not a valid file"), strval); + (void) zfs_error(hdl, EZFS_BADPATH, errbuf); + goto error; + } + + *slash = '\0'; + + if (strval[0] != '\0' && + (stat64(strval, &statbuf) != 0 || + !S_ISDIR(statbuf.st_mode))) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' is not a valid directory"), + strval); + (void) zfs_error(hdl, EZFS_BADPATH, errbuf); + goto error; + } + + *slash = '/'; + break; + } + } + + return (retprops); +error: + nvlist_free(retprops); + return (NULL); +} + +/* + * Set zpool property : propname=propval. + */ +int +zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) +{ + zfs_cmd_t zc = { 0 }; + int ret = -1; + char errbuf[1024]; + nvlist_t *nvl = NULL; + nvlist_t *realprops; + uint64_t version; + + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), + zhp->zpool_name); + + if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) + return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf)); + + if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) + return (no_memory(zhp->zpool_hdl)); + + if (nvlist_add_string(nvl, propname, propval) != 0) { + nvlist_free(nvl); + return (no_memory(zhp->zpool_hdl)); + } + + version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); + if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, + zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { + nvlist_free(nvl); + return (-1); + } + + nvlist_free(nvl); + nvl = realprops; + + /* + * Execute the corresponding ioctl() to set this property. + */ + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + + if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { + nvlist_free(nvl); + return (-1); + } + + ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); + + zcmd_free_nvlists(&zc); + nvlist_free(nvl); + + if (ret) + (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); + else + (void) zpool_props_refresh(zhp); + + return (ret); +} + +int +zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) +{ + libzfs_handle_t *hdl = zhp->zpool_hdl; + zprop_list_t *entry; + char buf[ZFS_MAXPROPLEN]; + + if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) + return (-1); + + for (entry = *plp; entry != NULL; entry = entry->pl_next) { + + if (entry->pl_fixed) + continue; + + if (entry->pl_prop != ZPROP_INVAL && + zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), + NULL) == 0) { + if (strlen(buf) > entry->pl_width) + entry->pl_width = strlen(buf); + } + } + + return (0); +} + + +/* + * Validate the given pool name, optionally putting an extended error message in + * 'buf'. + */ +boolean_t +zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) +{ + namecheck_err_t why; + char what; + int ret; + + ret = pool_namecheck(pool, &why, &what); + + /* + * The rules for reserved pool names were extended at a later point. + * But we need to support users with existing pools that may now be + * invalid. So we only check for this expanded set of names during a + * create (or import), and only in userland. + */ + if (ret == 0 && !isopen && + (strncmp(pool, "mirror", 6) == 0 || + strncmp(pool, "raidz", 5) == 0 || + strncmp(pool, "spare", 5) == 0 || + strcmp(pool, "log") == 0)) { + if (hdl != NULL) + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "name is reserved")); + return (B_FALSE); + } + + + if (ret != 0) { + if (hdl != NULL) { + switch (why) { + case NAME_ERR_TOOLONG: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "name is too long")); + break; + + case NAME_ERR_INVALCHAR: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "invalid character " + "'%c' in pool name"), what); + break; + + case NAME_ERR_NOLETTER: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "name must begin with a letter")); + break; + + case NAME_ERR_RESERVED: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "name is reserved")); + break; + + case NAME_ERR_DISKLIKE: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool name is reserved")); + break; + + case NAME_ERR_LEADING_SLASH: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "leading slash in name")); + break; + + case NAME_ERR_EMPTY_COMPONENT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "empty component in name")); + break; + + case NAME_ERR_TRAILING_SLASH: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "trailing slash in name")); + break; + + case NAME_ERR_MULTIPLE_AT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "multiple '@' delimiters in name")); + break; + + } + } + return (B_FALSE); + } + + return (B_TRUE); +} + +/* + * Open a handle to the given pool, even if the pool is currently in the FAULTED + * state. + */ +zpool_handle_t * +zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) +{ + zpool_handle_t *zhp; + boolean_t missing; + + /* + * Make sure the pool name is valid. + */ + if (!zpool_name_valid(hdl, B_TRUE, pool)) { + (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, + dgettext(TEXT_DOMAIN, "cannot open '%s'"), + pool); + return (NULL); + } + + if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) + return (NULL); + + zhp->zpool_hdl = hdl; + (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); + + if (zpool_refresh_stats(zhp, &missing) != 0) { + zpool_close(zhp); + return (NULL); + } + + if (missing) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); + (void) zfs_error_fmt(hdl, EZFS_NOENT, + dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); + zpool_close(zhp); + return (NULL); + } + + return (zhp); +} + +/* + * Like the above, but silent on error. Used when iterating over pools (because + * the configuration cache may be out of date). + */ +int +zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) +{ + zpool_handle_t *zhp; + boolean_t missing; + + if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) + return (-1); + + zhp->zpool_hdl = hdl; + (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); + + if (zpool_refresh_stats(zhp, &missing) != 0) { + zpool_close(zhp); + return (-1); + } + + if (missing) { + zpool_close(zhp); + *ret = NULL; + return (0); + } + + *ret = zhp; + return (0); +} + +/* + * Similar to zpool_open_canfail(), but refuses to open pools in the faulted + * state. + */ +zpool_handle_t * +zpool_open(libzfs_handle_t *hdl, const char *pool) +{ + zpool_handle_t *zhp; + + if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) + return (NULL); + + if (zhp->zpool_state == POOL_STATE_UNAVAIL) { + (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, + dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); + zpool_close(zhp); + return (NULL); + } + + return (zhp); +} + +/* + * Close the handle. Simply frees the memory associated with the handle. + */ +void +zpool_close(zpool_handle_t *zhp) +{ + if (zhp->zpool_config) + nvlist_free(zhp->zpool_config); + if (zhp->zpool_old_config) + nvlist_free(zhp->zpool_old_config); + if (zhp->zpool_props) + nvlist_free(zhp->zpool_props); + free(zhp); +} + +/* + * Return the name of the pool. + */ +const char * +zpool_get_name(zpool_handle_t *zhp) +{ + return (zhp->zpool_name); +} + + +/* + * Return the state of the pool (ACTIVE or UNAVAILABLE) + */ +int +zpool_get_state(zpool_handle_t *zhp) +{ + return (zhp->zpool_state); +} + +/* + * Create the named pool, using the provided vdev list. It is assumed + * that the consumer has already validated the contents of the nvlist, so we + * don't have to worry about error semantics. + */ +int +zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, + nvlist_t *props, nvlist_t *fsprops) +{ + zfs_cmd_t zc = { 0 }; + nvlist_t *zc_fsprops = NULL; + nvlist_t *zc_props = NULL; + char msg[1024]; + char *altroot; + int ret = -1; + + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot create '%s'"), pool); + + if (!zpool_name_valid(hdl, B_FALSE, pool)) + return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); + + if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) + return (-1); + + if (props) { + if ((zc_props = zpool_valid_proplist(hdl, pool, props, + SPA_VERSION_1, B_TRUE, msg)) == NULL) { + goto create_failed; + } + } + + if (fsprops) { + uint64_t zoned; + char *zonestr; + + zoned = ((nvlist_lookup_string(fsprops, + zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && + strcmp(zonestr, "on") == 0); + + if ((zc_fsprops = zfs_valid_proplist(hdl, + ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { + goto create_failed; + } + if (!zc_props && + (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { + goto create_failed; + } + if (nvlist_add_nvlist(zc_props, + ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { + goto create_failed; + } + } + + if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) + goto create_failed; + + (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); + + if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { + + zcmd_free_nvlists(&zc); + nvlist_free(zc_props); + nvlist_free(zc_fsprops); + + switch (errno) { + case EBUSY: + /* + * This can happen if the user has specified the same + * device multiple times. We can't reliably detect this + * until we try to add it and see we already have a + * label. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more vdevs refer to the same device")); + return (zfs_error(hdl, EZFS_BADDEV, msg)); + + case EOVERFLOW: + /* + * This occurs when one of the devices is below + * SPA_MINDEVSIZE. Unfortunately, we can't detect which + * device was the problem device since there's no + * reliable way to determine device size from userland. + */ + { + char buf[64]; + + zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); + + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more devices is less than the " + "minimum size (%s)"), buf); + } + return (zfs_error(hdl, EZFS_BADDEV, msg)); + + case ENOSPC: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more devices is out of space")); + return (zfs_error(hdl, EZFS_BADDEV, msg)); + + case ENOTBLK: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "cache device must be a disk or disk slice")); + return (zfs_error(hdl, EZFS_BADDEV, msg)); + + default: + return (zpool_standard_error(hdl, errno, msg)); + } + } + + /* + * If this is an alternate root pool, then we automatically set the + * mountpoint of the root dataset to be '/'. + */ + if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), + &altroot) == 0) { + zfs_handle_t *zhp; + + verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); + verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), + "/") == 0); + + zfs_close(zhp); + } + +create_failed: + zcmd_free_nvlists(&zc); + nvlist_free(zc_props); + nvlist_free(zc_fsprops); + return (ret); +} + +/* + * Destroy the given pool. It is up to the caller to ensure that there are no + * datasets left in the pool. + */ +int +zpool_destroy(zpool_handle_t *zhp) +{ + zfs_cmd_t zc = { 0 }; + zfs_handle_t *zfp = NULL; + libzfs_handle_t *hdl = zhp->zpool_hdl; + char msg[1024]; + + if (zhp->zpool_state == POOL_STATE_ACTIVE && + (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, + ZFS_TYPE_FILESYSTEM)) == NULL) + return (-1); + + if (zpool_remove_zvol_links(zhp) != 0) + return (-1); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot destroy '%s'"), zhp->zpool_name); + + if (errno == EROFS) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more devices is read only")); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + } else { + (void) zpool_standard_error(hdl, errno, msg); + } + + if (zfp) + zfs_close(zfp); + return (-1); + } + + if (zfp) { + remove_mountpoint(zfp); + zfs_close(zfp); + } + + return (0); +} + +/* + * Add the given vdevs to the pool. The caller must have already performed the + * necessary verification to ensure that the vdev specification is well-formed. + */ +int +zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) +{ + zfs_cmd_t zc = { 0 }; + int ret; + libzfs_handle_t *hdl = zhp->zpool_hdl; + char msg[1024]; + nvlist_t **spares, **l2cache; + uint_t nspares, nl2cache; + + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot add to '%s'"), zhp->zpool_name); + + if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < + SPA_VERSION_SPARES && + nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " + "upgraded to add hot spares")); + return (zfs_error(hdl, EZFS_BADVERSION, msg)); + } + + if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, + ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { + uint64_t s; + + for (s = 0; s < nspares; s++) { + char *path; + + if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, + &path) == 0 && pool_uses_efi(spares[s])) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "device '%s' contains an EFI label and " + "cannot be used on root pools."), + zpool_vdev_name(hdl, NULL, spares[s])); + return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); + } + } + } + + if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < + SPA_VERSION_L2CACHE && + nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " + "upgraded to add cache devices")); + return (zfs_error(hdl, EZFS_BADVERSION, msg)); + } + + if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) + return (-1); + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { + switch (errno) { + case EBUSY: + /* + * This can happen if the user has specified the same + * device multiple times. We can't reliably detect this + * until we try to add it and see we already have a + * label. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more vdevs refer to the same device")); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + case EOVERFLOW: + /* + * This occurrs when one of the devices is below + * SPA_MINDEVSIZE. Unfortunately, we can't detect which + * device was the problem device since there's no + * reliable way to determine device size from userland. + */ + { + char buf[64]; + + zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); + + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "device is less than the minimum " + "size (%s)"), buf); + } + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + case ENOTSUP: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool must be upgraded to add these vdevs")); + (void) zfs_error(hdl, EZFS_BADVERSION, msg); + break; + + case EDOM: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "root pool can not have multiple vdevs" + " or separate logs")); + (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); + break; + + case ENOTBLK: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "cache device must be a disk or disk slice")); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + default: + (void) zpool_standard_error(hdl, errno, msg); + } + + ret = -1; + } else { + ret = 0; + } + + zcmd_free_nvlists(&zc); + + return (ret); +} + +/* + * Exports the pool from the system. The caller must ensure that there are no + * mounted datasets in the pool. + */ +int +zpool_export(zpool_handle_t *zhp, boolean_t force) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + + if (zpool_remove_zvol_links(zhp) != 0) + return (-1); + + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot export '%s'"), zhp->zpool_name); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_cookie = force; + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { + switch (errno) { + case EXDEV: + zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, + "use '-f' to override the following errors:\n" + "'%s' has an active shared spare which could be" + " used by other pools once '%s' is exported."), + zhp->zpool_name, zhp->zpool_name); + return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, + msg)); + default: + return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, + msg)); + } + } + + return (0); +} + +/* + * zpool_import() is a contracted interface. Should be kept the same + * if possible. + * + * Applications should use zpool_import_props() to import a pool with + * new properties value to be set. + */ +int +zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, + char *altroot) +{ + nvlist_t *props = NULL; + int ret; + + if (altroot != NULL) { + if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { + return (zfs_error_fmt(hdl, EZFS_NOMEM, + dgettext(TEXT_DOMAIN, "cannot import '%s'"), + newname)); + } + + if (nvlist_add_string(props, + zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || + nvlist_add_string(props, + zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { + nvlist_free(props); + return (zfs_error_fmt(hdl, EZFS_NOMEM, + dgettext(TEXT_DOMAIN, "cannot import '%s'"), + newname)); + } + } + + ret = zpool_import_props(hdl, config, newname, props, B_FALSE); + if (props) + nvlist_free(props); + return (ret); +} + +/* + * Import the given pool using the known configuration and a list of + * properties to be set. The configuration should have come from + * zpool_find_import(). The 'newname' parameters control whether the pool + * is imported with a different name. + */ +int +zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, + nvlist_t *props, boolean_t importfaulted) +{ + zfs_cmd_t zc = { 0 }; + char *thename; + char *origname; + int ret; + char errbuf[1024]; + + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &origname) == 0); + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot import pool '%s'"), origname); + + if (newname != NULL) { + if (!zpool_name_valid(hdl, B_FALSE, newname)) + return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, + dgettext(TEXT_DOMAIN, "cannot import '%s'"), + newname)); + thename = (char *)newname; + } else { + thename = origname; + } + + if (props) { + uint64_t version; + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, + &version) == 0); + + if ((props = zpool_valid_proplist(hdl, origname, + props, version, B_TRUE, errbuf)) == NULL) { + return (-1); + } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { + nvlist_free(props); + return (-1); + } + } + + (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, + &zc.zc_guid) == 0); + + if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { + nvlist_free(props); + return (-1); + } + + zc.zc_cookie = (uint64_t)importfaulted; + ret = 0; + if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { + char desc[1024]; + if (newname == NULL) + (void) snprintf(desc, sizeof (desc), + dgettext(TEXT_DOMAIN, "cannot import '%s'"), + thename); + else + (void) snprintf(desc, sizeof (desc), + dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), + origname, thename); + + switch (errno) { + case ENOTSUP: + /* + * Unsupported version. + */ + (void) zfs_error(hdl, EZFS_BADVERSION, desc); + break; + + case EINVAL: + (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); + break; + + default: + (void) zpool_standard_error(hdl, errno, desc); + } + + ret = -1; + } else { + zpool_handle_t *zhp; + + /* + * This should never fail, but play it safe anyway. + */ + if (zpool_open_silent(hdl, thename, &zhp) != 0) { + ret = -1; + } else if (zhp != NULL) { + ret = zpool_create_zvol_links(zhp); + zpool_close(zhp); + } + + } + + zcmd_free_nvlists(&zc); + nvlist_free(props); + + return (ret); +} + +/* + * Scrub the pool. + */ +int +zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_cookie = type; + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0) + return (0); + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); + + if (errno == EBUSY) + return (zfs_error(hdl, EZFS_RESILVERING, msg)); + else + return (zpool_standard_error(hdl, errno, msg)); +} + +/* + * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL + * spare; but FALSE if its an INUSE spare. + */ +static nvlist_t * +vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid, + boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) +{ + uint_t c, children; + nvlist_t **child; + uint64_t theguid, present; + char *path; + uint64_t wholedisk = 0; + nvlist_t *ret; + uint64_t is_log; + + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); + + if (search == NULL && + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { + /* + * If the device has never been present since import, the only + * reliable way to match the vdev is by GUID. + */ + if (theguid == guid) + return (nv); + } else if (search != NULL && + nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + &wholedisk); + if (wholedisk) { + /* + * For whole disks, the internal path has 's0', but the + * path passed in by the user doesn't. + */ + if (strlen(search) == strlen(path) - 2 && + strncmp(search, path, strlen(search)) == 0) + return (nv); + } else if (strcmp(search, path) == 0) { + return (nv); + } + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0) + return (NULL); + + for (c = 0; c < children; c++) { + if ((ret = vdev_to_nvlist_iter(child[c], search, guid, + avail_spare, l2cache, NULL)) != NULL) { + /* + * The 'is_log' value is only set for the toplevel + * vdev, not the leaf vdevs. So we always lookup the + * log device from the root of the vdev tree (where + * 'log' is non-NULL). + */ + if (log != NULL && + nvlist_lookup_uint64(child[c], + ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && + is_log) { + *log = B_TRUE; + } + return (ret); + } + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, + &child, &children) == 0) { + for (c = 0; c < children; c++) { + if ((ret = vdev_to_nvlist_iter(child[c], search, guid, + avail_spare, l2cache, NULL)) != NULL) { + *avail_spare = B_TRUE; + return (ret); + } + } + } + + if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, + &child, &children) == 0) { + for (c = 0; c < children; c++) { + if ((ret = vdev_to_nvlist_iter(child[c], search, guid, + avail_spare, l2cache, NULL)) != NULL) { + *l2cache = B_TRUE; + return (ret); + } + } + } + + return (NULL); +} + +nvlist_t * +zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, + boolean_t *l2cache, boolean_t *log) +{ + char buf[MAXPATHLEN]; + const char *search; + char *end; + nvlist_t *nvroot; + uint64_t guid; + + guid = strtoull(path, &end, 10); + if (guid != 0 && *end == '\0') { + search = NULL; + } else if (path[0] != '/') { + (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); + search = buf; + } else { + search = path; + } + + verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + + *avail_spare = B_FALSE; + *l2cache = B_FALSE; + if (log != NULL) + *log = B_FALSE; + return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare, + l2cache, log)); +} + +static int +vdev_online(nvlist_t *nv) +{ + uint64_t ival; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) + return (0); + + return (1); +} + +/* + * Get phys_path for a root pool + * Return 0 on success; non-zeron on failure. + */ +int +zpool_get_physpath(zpool_handle_t *zhp, char *physpath) +{ + nvlist_t *vdev_root; + nvlist_t **child; + uint_t count; + int i; + + /* + * Make sure this is a root pool, as phys_path doesn't mean + * anything to a non-root pool. + */ + if (!pool_is_bootable(zhp)) + return (-1); + + verify(nvlist_lookup_nvlist(zhp->zpool_config, + ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0); + + if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, + &child, &count) != 0) + return (-2); + + for (i = 0; i < count; i++) { + nvlist_t **child2; + uint_t count2; + char *type; + char *tmppath; + int j; + + if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type) + != 0) + return (-3); + + if (strcmp(type, VDEV_TYPE_DISK) == 0) { + if (!vdev_online(child[i])) + return (-8); + verify(nvlist_lookup_string(child[i], + ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0); + (void) strncpy(physpath, tmppath, strlen(tmppath)); + } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) { + if (nvlist_lookup_nvlist_array(child[i], + ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0) + return (-4); + + for (j = 0; j < count2; j++) { + if (!vdev_online(child2[j])) + return (-8); + if (nvlist_lookup_string(child2[j], + ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0) + return (-5); + + if ((strlen(physpath) + strlen(tmppath)) > + MAXNAMELEN) + return (-6); + + if (strlen(physpath) == 0) { + (void) strncpy(physpath, tmppath, + strlen(tmppath)); + } else { + (void) strcat(physpath, " "); + (void) strcat(physpath, tmppath); + } + } + } else { + return (-7); + } + } + + return (0); +} + +/* + * Returns TRUE if the given guid corresponds to the given type. + * This is used to check for hot spares (INUSE or not), and level 2 cache + * devices. + */ +static boolean_t +is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type) +{ + uint64_t target_guid; + nvlist_t *nvroot; + nvlist_t **list; + uint_t count; + int i; + + verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) { + for (i = 0; i < count; i++) { + verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID, + &target_guid) == 0); + if (guid == target_guid) + return (B_TRUE); + } + } + + return (B_FALSE); +} + +/* + * Bring the specified vdev online. The 'flags' parameter is a set of the + * ZFS_ONLINE_* flags. + */ +int +zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, + vdev_state_t *newstate) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + nvlist_t *tgt; + boolean_t avail_spare, l2cache; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot online %s"), path); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, + NULL)) == NULL) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); + + if (avail_spare || + is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) + return (zfs_error(hdl, EZFS_ISSPARE, msg)); + + zc.zc_cookie = VDEV_STATE_ONLINE; + zc.zc_obj = flags; + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) + return (zpool_standard_error(hdl, errno, msg)); + + *newstate = zc.zc_cookie; + return (0); +} + +/* + * Take the specified vdev offline + */ +int +zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + nvlist_t *tgt; + boolean_t avail_spare, l2cache; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot offline %s"), path); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, + NULL)) == NULL) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); + + if (avail_spare || + is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE) + return (zfs_error(hdl, EZFS_ISSPARE, msg)); + + zc.zc_cookie = VDEV_STATE_OFFLINE; + zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; + + if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) + return (0); + + switch (errno) { + case EBUSY: + + /* + * There are no other replicas of this device. + */ + return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); + + default: + return (zpool_standard_error(hdl, errno, msg)); + } +} + +/* + * Mark the given vdev faulted. + */ +int +zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_guid = guid; + zc.zc_cookie = VDEV_STATE_FAULTED; + + if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) + return (0); + + switch (errno) { + case EBUSY: + + /* + * There are no other replicas of this device. + */ + return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); + + default: + return (zpool_standard_error(hdl, errno, msg)); + } + +} + +/* + * Mark the given vdev degraded. + */ +int +zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_guid = guid; + zc.zc_cookie = VDEV_STATE_DEGRADED; + + if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) + return (0); + + return (zpool_standard_error(hdl, errno, msg)); +} + +/* + * Returns TRUE if the given nvlist is a vdev that was originally swapped in as + * a hot spare. + */ +static boolean_t +is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) +{ + nvlist_t **child; + uint_t c, children; + char *type; + + if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, + &children) == 0) { + verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, + &type) == 0); + + if (strcmp(type, VDEV_TYPE_SPARE) == 0 && + children == 2 && child[which] == tgt) + return (B_TRUE); + + for (c = 0; c < children; c++) + if (is_replacing_spare(child[c], tgt, which)) + return (B_TRUE); + } + + return (B_FALSE); +} + +/* + * Attach new_disk (fully described by nvroot) to old_disk. + * If 'replacing' is specified, the new disk will replace the old one. + */ +int +zpool_vdev_attach(zpool_handle_t *zhp, + const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + int ret; + nvlist_t *tgt; + boolean_t avail_spare, l2cache, islog; + uint64_t val; + char *path, *newname; + nvlist_t **child; + uint_t children; + nvlist_t *config_root; + libzfs_handle_t *hdl = zhp->zpool_hdl; + boolean_t rootpool = pool_is_bootable(zhp); + + if (replacing) + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot replace %s with %s"), old_disk, new_disk); + else + (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, + "cannot attach %s to %s"), new_disk, old_disk); + + /* + * If this is a root pool, make sure that we're not attaching an + * EFI labeled device. + */ + if (rootpool && pool_uses_efi(nvroot)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "EFI labeled devices are not supported on root pools.")); + return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); + } + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, + &islog)) == 0) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + if (avail_spare) + return (zfs_error(hdl, EZFS_ISSPARE, msg)); + + if (l2cache) + return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); + zc.zc_cookie = replacing; + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &child, &children) != 0 || children != 1) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "new device must be a single disk")); + return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); + } + + verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), + ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); + + if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL) + return (-1); + + /* + * If the target is a hot spare that has been swapped in, we can only + * replace it with another hot spare. + */ + if (replacing && + nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && + (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, + NULL) == NULL || !avail_spare) && + is_replacing_spare(config_root, tgt, 1)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "can only be replaced by another hot spare")); + free(newname); + return (zfs_error(hdl, EZFS_BADTARGET, msg)); + } + + /* + * If we are attempting to replace a spare, it canot be applied to an + * already spared device. + */ + if (replacing && + nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && + zpool_find_vdev(zhp, newname, &avail_spare, + &l2cache, NULL) != NULL && avail_spare && + is_replacing_spare(config_root, tgt, 0)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "device has already been replaced with a spare")); + free(newname); + return (zfs_error(hdl, EZFS_BADTARGET, msg)); + } + + free(newname); + + if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) + return (-1); + + ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); + + zcmd_free_nvlists(&zc); + + if (ret == 0) { + if (rootpool) { + /* + * XXX - This should be removed once we can + * automatically install the bootblocks on the + * newly attached disk. + */ + (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " + "be sure to invoke %s to make '%s' bootable.\n"), + BOOTCMD, new_disk); + } + return (0); + } + + switch (errno) { + case ENOTSUP: + /* + * Can't attach to or replace this type of vdev. + */ + if (replacing) { + if (islog) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "cannot replace a log with a spare")); + else + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "cannot replace a replacing device")); + } else { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "can only attach to mirrors and top-level " + "disks")); + } + (void) zfs_error(hdl, EZFS_BADTARGET, msg); + break; + + case EINVAL: + /* + * The new device must be a single disk. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "new device must be a single disk")); + (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); + break; + + case EBUSY: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), + new_disk); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + case EOVERFLOW: + /* + * The new device is too small. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "device is too small")); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + case EDOM: + /* + * The new device has a different alignment requirement. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "devices have different sector alignment")); + (void) zfs_error(hdl, EZFS_BADDEV, msg); + break; + + case ENAMETOOLONG: + /* + * The resulting top-level vdev spec won't fit in the label. + */ + (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); + break; + + default: + (void) zpool_standard_error(hdl, errno, msg); + } + + return (-1); +} + +/* + * Detach the specified device. + */ +int +zpool_vdev_detach(zpool_handle_t *zhp, const char *path) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + nvlist_t *tgt; + boolean_t avail_spare, l2cache; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot detach %s"), path); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, + NULL)) == 0) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + if (avail_spare) + return (zfs_error(hdl, EZFS_ISSPARE, msg)); + + if (l2cache) + return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); + + if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) + return (0); + + switch (errno) { + + case ENOTSUP: + /* + * Can't detach from this type of vdev. + */ + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " + "applicable to mirror and replacing vdevs")); + (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); + break; + + case EBUSY: + /* + * There are no other replicas of this device. + */ + (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); + break; + + default: + (void) zpool_standard_error(hdl, errno, msg); + } + + return (-1); +} + +/* + * Remove the given device. Currently, this is supported only for hot spares + * and level 2 cache devices. + */ +int +zpool_vdev_remove(zpool_handle_t *zhp, const char *path) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + nvlist_t *tgt; + boolean_t avail_spare, l2cache; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot remove %s"), path); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, + NULL)) == 0) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + if (!avail_spare && !l2cache) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "only inactive hot spares or cache devices " + "can be removed")); + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + } + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); + + if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) + return (0); + + return (zpool_standard_error(hdl, errno, msg)); +} + +/* + * Clear the errors for the pool, or the particular device if specified. + */ +int +zpool_clear(zpool_handle_t *zhp, const char *path) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + nvlist_t *tgt; + boolean_t avail_spare, l2cache; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + if (path) + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), + path); + else + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), + zhp->zpool_name); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + if (path) { + if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, + &l2cache, NULL)) == 0) + return (zfs_error(hdl, EZFS_NODEVICE, msg)); + + /* + * Don't allow error clearing for hot spares. Do allow + * error clearing for l2cache devices. + */ + if (avail_spare) + return (zfs_error(hdl, EZFS_ISSPARE, msg)); + + verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, + &zc.zc_guid) == 0); + } + + if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0) + return (0); + + return (zpool_standard_error(hdl, errno, msg)); +} + +/* + * Similar to zpool_clear(), but takes a GUID (used by fmd). + */ +int +zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) +{ + zfs_cmd_t zc = { 0 }; + char msg[1024]; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) snprintf(msg, sizeof (msg), + dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), + guid); + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_guid = guid; + + if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) + return (0); + + return (zpool_standard_error(hdl, errno, msg)); +} + +/* + * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/ + * hierarchy. + */ +int +zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *), + void *data) +{ + libzfs_handle_t *hdl = zhp->zpool_hdl; + char (*paths)[MAXPATHLEN]; + size_t size = 4; + int curr, fd, base, ret = 0; + DIR *dirp; + struct dirent *dp; + struct stat st; + + if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0) + return (errno == ENOENT ? 0 : -1); + + if (fstatat(base, zhp->zpool_name, &st, 0) != 0) { + int err = errno; + (void) close(base); + return (err == ENOENT ? 0 : -1); + } + + /* + * Oddly this wasn't a directory -- ignore that failure since we + * know there are no links lower in the (non-existant) hierarchy. + */ + if (!S_ISDIR(st.st_mode)) { + (void) close(base); + return (0); + } + + if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) { + (void) close(base); + return (-1); + } + + (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0])); + curr = 0; + + while (curr >= 0) { + if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0) + goto err; + + if (S_ISDIR(st.st_mode)) { + if ((fd = openat(base, paths[curr], O_RDONLY)) < 0) + goto err; + + if ((dirp = fdopendir(fd)) == NULL) { + (void) close(fd); + goto err; + } + + while ((dp = readdir(dirp)) != NULL) { + if (dp->d_name[0] == '.') + continue; + + if (curr + 1 == size) { + paths = zfs_realloc(hdl, paths, + size * sizeof (paths[0]), + size * 2 * sizeof (paths[0])); + if (paths == NULL) { + (void) closedir(dirp); + (void) close(fd); + goto err; + } + + size *= 2; + } + + (void) strlcpy(paths[curr + 1], paths[curr], + sizeof (paths[curr + 1])); + (void) strlcat(paths[curr], "/", + sizeof (paths[curr])); + (void) strlcat(paths[curr], dp->d_name, + sizeof (paths[curr])); + curr++; + } + + (void) closedir(dirp); + + } else { + if ((ret = cb(paths[curr], data)) != 0) + break; + } + + curr--; + } + + free(paths); + (void) close(base); + + return (ret); + +err: + free(paths); + (void) close(base); + return (-1); +} + +typedef struct zvol_cb { + zpool_handle_t *zcb_pool; + boolean_t zcb_create; +} zvol_cb_t; + +/*ARGSUSED*/ +static int +do_zvol_create(zfs_handle_t *zhp, void *data) +{ + int ret = 0; + + if (ZFS_IS_VOLUME(zhp)) { + (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name); + ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL); + } + + if (ret == 0) + ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL); + + zfs_close(zhp); + + return (ret); +} + +/* + * Iterate over all zvols in the pool and make any necessary minor nodes. + */ +int +zpool_create_zvol_links(zpool_handle_t *zhp) +{ + zfs_handle_t *zfp; + int ret; + + /* + * If the pool is unavailable, just return success. + */ + if ((zfp = make_dataset_handle(zhp->zpool_hdl, + zhp->zpool_name)) == NULL) + return (0); + + ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL); + + zfs_close(zfp); + return (ret); +} + +static int +do_zvol_remove(const char *dataset, void *data) +{ + zpool_handle_t *zhp = data; + + return (zvol_remove_link(zhp->zpool_hdl, dataset)); +} + +/* + * Iterate over all zvols in the pool and remove any minor nodes. We iterate + * by examining the /dev links so that a corrupted pool doesn't impede this + * operation. + */ +int +zpool_remove_zvol_links(zpool_handle_t *zhp) +{ + return (zpool_iter_zvol(zhp, do_zvol_remove, zhp)); +} + +/* + * Convert from a devid string to a path. + */ +static char * +devid_to_path(char *devid_str) +{ + ddi_devid_t devid; + char *minor; + char *path; + devid_nmlist_t *list = NULL; + int ret; + + if (devid_str_decode(devid_str, &devid, &minor) != 0) + return (NULL); + + ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); + + devid_str_free(minor); + devid_free(devid); + + if (ret != 0) + return (NULL); + + if ((path = strdup(list[0].devname)) == NULL) + return (NULL); + + devid_free_nmlist(list); + + return (path); +} + +/* + * Convert from a path to a devid string. + */ +static char * +path_to_devid(const char *path) +{ + int fd; + ddi_devid_t devid; + char *minor, *ret; + + if ((fd = open(path, O_RDONLY)) < 0) + return (NULL); + + minor = NULL; + ret = NULL; + if (devid_get(fd, &devid) == 0) { + if (devid_get_minor_name(fd, &minor) == 0) + ret = devid_str_encode(devid, minor); + if (minor != NULL) + devid_str_free(minor); + devid_free(devid); + } + (void) close(fd); + + return (ret); +} + +/* + * Issue the necessary ioctl() to update the stored path value for the vdev. We + * ignore any failure here, since a common case is for an unprivileged user to + * type 'zpool status', and we'll display the correct information anyway. + */ +static void +set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) +{ + zfs_cmd_t zc = { 0 }; + + (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, + &zc.zc_guid) == 0); + + (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); +} + +/* + * Given a vdev, return the name to display in iostat. If the vdev has a path, + * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. + * We also check if this is a whole disk, in which case we strip off the + * trailing 's0' slice name. + * + * This routine is also responsible for identifying when disks have been + * reconfigured in a new location. The kernel will have opened the device by + * devid, but the path will still refer to the old location. To catch this, we + * first do a path -> devid translation (which is fast for the common case). If + * the devid matches, we're done. If not, we do a reverse devid -> path + * translation and issue the appropriate ioctl() to update the path of the vdev. + * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any + * of these checks. + */ +char * +zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv) +{ + char *path, *devid; + uint64_t value; + char buf[64]; + vdev_stat_t *vs; + uint_t vsc; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, + &value) == 0) { + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, + &value) == 0); + (void) snprintf(buf, sizeof (buf), "%llu", + (u_longlong_t)value); + path = buf; + } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { + + /* + * If the device is dead (faulted, offline, etc) then don't + * bother opening it. Otherwise we may be forcing the user to + * open a misbehaving device, which can have undesirable + * effects. + */ + if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &vsc) != 0 || + vs->vs_state >= VDEV_STATE_DEGRADED) && + zhp != NULL && + nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { + /* + * Determine if the current path is correct. + */ + char *newdevid = path_to_devid(path); + + if (newdevid == NULL || + strcmp(devid, newdevid) != 0) { + char *newpath; + + if ((newpath = devid_to_path(devid)) != NULL) { + /* + * Update the path appropriately. + */ + set_path(zhp, nv, newpath); + if (nvlist_add_string(nv, + ZPOOL_CONFIG_PATH, newpath) == 0) + verify(nvlist_lookup_string(nv, + ZPOOL_CONFIG_PATH, + &path) == 0); + free(newpath); + } + } + + if (newdevid) + devid_str_free(newdevid); + } + + if (strncmp(path, "/dev/dsk/", 9) == 0) + path += 9; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + &value) == 0 && value) { + char *tmp = zfs_strdup(hdl, path); + if (tmp == NULL) + return (NULL); + tmp[strlen(path) - 2] = '\0'; + return (tmp); + } + } else { + verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); + + /* + * If it's a raidz device, we need to stick in the parity level. + */ + if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { + verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, + &value) == 0); + (void) snprintf(buf, sizeof (buf), "%s%llu", path, + (u_longlong_t)value); + path = buf; + } + } + + return (zfs_strdup(hdl, path)); +} + +static int +zbookmark_compare(const void *a, const void *b) +{ + return (memcmp(a, b, sizeof (zbookmark_t))); +} + +/* + * Retrieve the persistent error log, uniquify the members, and return to the + * caller. + */ +int +zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) +{ + zfs_cmd_t zc = { 0 }; + uint64_t count; + zbookmark_t *zb = NULL; + int i; + + /* + * Retrieve the raw error list from the kernel. If the number of errors + * has increased, allocate more space and continue until we get the + * entire list. + */ + verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, + &count) == 0); + if (count == 0) + return (0); + if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, + count * sizeof (zbookmark_t))) == (uintptr_t)NULL) + return (-1); + zc.zc_nvlist_dst_size = count; + (void) strcpy(zc.zc_name, zhp->zpool_name); + for (;;) { + if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, + &zc) != 0) { + free((void *)(uintptr_t)zc.zc_nvlist_dst); + if (errno == ENOMEM) { + count = zc.zc_nvlist_dst_size; + if ((zc.zc_nvlist_dst = (uintptr_t) + zfs_alloc(zhp->zpool_hdl, count * + sizeof (zbookmark_t))) == (uintptr_t)NULL) + return (-1); + } else { + return (-1); + } + } else { + break; + } + } + + /* + * Sort the resulting bookmarks. This is a little confusing due to the + * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last + * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks + * _not_ copied as part of the process. So we point the start of our + * array appropriate and decrement the total number of elements. + */ + zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + + zc.zc_nvlist_dst_size; + count -= zc.zc_nvlist_dst_size; + + qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); + + verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); + + /* + * Fill in the nverrlistp with nvlist's of dataset and object numbers. + */ + for (i = 0; i < count; i++) { + nvlist_t *nv; + + /* ignoring zb_blkid and zb_level for now */ + if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && + zb[i-1].zb_object == zb[i].zb_object) + continue; + + if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) + goto nomem; + if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, + zb[i].zb_objset) != 0) { + nvlist_free(nv); + goto nomem; + } + if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, + zb[i].zb_object) != 0) { + nvlist_free(nv); + goto nomem; + } + if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { + nvlist_free(nv); + goto nomem; + } + nvlist_free(nv); + } + + free((void *)(uintptr_t)zc.zc_nvlist_dst); + return (0); + +nomem: + free((void *)(uintptr_t)zc.zc_nvlist_dst); + return (no_memory(zhp->zpool_hdl)); +} + +/* + * Upgrade a ZFS pool to the latest on-disk version. + */ +int +zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) +{ + zfs_cmd_t zc = { 0 }; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) strcpy(zc.zc_name, zhp->zpool_name); + zc.zc_cookie = new_version; + + if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) + return (zpool_standard_error_fmt(hdl, errno, + dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), + zhp->zpool_name)); + return (0); +} + +void +zpool_set_history_str(const char *subcommand, int argc, char **argv, + char *history_str) +{ + int i; + + (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); + for (i = 1; i < argc; i++) { + if (strlen(history_str) + 1 + strlen(argv[i]) > + HIS_MAX_RECORD_LEN) + break; + (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); + (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); + } +} + +/* + * Stage command history for logging. + */ +int +zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) +{ + if (history_str == NULL) + return (EINVAL); + + if (strlen(history_str) > HIS_MAX_RECORD_LEN) + return (EINVAL); + + if (hdl->libzfs_log_str != NULL) + free(hdl->libzfs_log_str); + + if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) + return (no_memory(hdl)); + + return (0); +} + +/* + * Perform ioctl to get some command history of a pool. + * + * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the + * logical offset of the history buffer to start reading from. + * + * Upon return, 'off' is the next logical offset to read from and + * 'len' is the actual amount of bytes read into 'buf'. + */ +static int +get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) +{ + zfs_cmd_t zc = { 0 }; + libzfs_handle_t *hdl = zhp->zpool_hdl; + + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + + zc.zc_history = (uint64_t)(uintptr_t)buf; + zc.zc_history_len = *len; + zc.zc_history_offset = *off; + + if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { + switch (errno) { + case EPERM: + return (zfs_error_fmt(hdl, EZFS_PERM, + dgettext(TEXT_DOMAIN, + "cannot show history for pool '%s'"), + zhp->zpool_name)); + case ENOENT: + return (zfs_error_fmt(hdl, EZFS_NOHISTORY, + dgettext(TEXT_DOMAIN, "cannot get history for pool " + "'%s'"), zhp->zpool_name)); + case ENOTSUP: + return (zfs_error_fmt(hdl, EZFS_BADVERSION, + dgettext(TEXT_DOMAIN, "cannot get history for pool " + "'%s', pool must be upgraded"), zhp->zpool_name)); + default: + return (zpool_standard_error_fmt(hdl, errno, + dgettext(TEXT_DOMAIN, + "cannot get history for '%s'"), zhp->zpool_name)); + } + } + + *len = zc.zc_history_len; + *off = zc.zc_history_offset; + + return (0); +} + +/* + * Process the buffer of nvlists, unpacking and storing each nvlist record + * into 'records'. 'leftover' is set to the number of bytes that weren't + * processed as there wasn't a complete record. + */ +static int +zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, + nvlist_t ***records, uint_t *numrecords) +{ + uint64_t reclen; + nvlist_t *nv; + int i; + + while (bytes_read > sizeof (reclen)) { + + /* get length of packed record (stored as little endian) */ + for (i = 0, reclen = 0; i < sizeof (reclen); i++) + reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); + + if (bytes_read < sizeof (reclen) + reclen) + break; + + /* unpack record */ + if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) + return (ENOMEM); + bytes_read -= sizeof (reclen) + reclen; + buf += sizeof (reclen) + reclen; + + /* add record to nvlist array */ + (*numrecords)++; + if (ISP2(*numrecords + 1)) { + *records = realloc(*records, + *numrecords * 2 * sizeof (nvlist_t *)); + } + (*records)[*numrecords - 1] = nv; + } + + *leftover = bytes_read; + return (0); +} + +#define HIS_BUF_LEN (128*1024) + +/* + * Retrieve the command history of a pool. + */ +int +zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) +{ + char buf[HIS_BUF_LEN]; + uint64_t off = 0; + nvlist_t **records = NULL; + uint_t numrecords = 0; + int err, i; + + do { + uint64_t bytes_read = sizeof (buf); + uint64_t leftover; + + if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) + break; + + /* if nothing else was read in, we're at EOF, just return */ + if (!bytes_read) + break; + + if ((err = zpool_history_unpack(buf, bytes_read, + &leftover, &records, &numrecords)) != 0) + break; + off -= leftover; + + /* CONSTCOND */ + } while (1); + + if (!err) { + verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); + verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, + records, numrecords) == 0); + } + for (i = 0; i < numrecords; i++) + nvlist_free(records[i]); + free(records); + + return (err); +} + +void +zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, + char *pathname, size_t len) +{ + zfs_cmd_t zc = { 0 }; + boolean_t mounted = B_FALSE; + char *mntpnt = NULL; + char dsname[MAXNAMELEN]; + + if (dsobj == 0) { + /* special case for the MOS */ + (void) snprintf(pathname, len, ":<0x%llx>", obj); + return; + } + + /* get the dataset's name */ + (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); + zc.zc_obj = dsobj; + if (ioctl(zhp->zpool_hdl->libzfs_fd, + ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { + /* just write out a path of two object numbers */ + (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", + dsobj, obj); + return; + } + (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); + + /* find out if the dataset is mounted */ + mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); + + /* get the corrupted object's path */ + (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); + zc.zc_obj = obj; + if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, + &zc) == 0) { + if (mounted) { + (void) snprintf(pathname, len, "%s%s", mntpnt, + zc.zc_value); + } else { + (void) snprintf(pathname, len, "%s:%s", + dsname, zc.zc_value); + } + } else { + (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); + } + free(mntpnt); +} + +#define RDISK_ROOT "/dev/rdsk" +#define BACKUP_SLICE "s2" +/* + * Don't start the slice at the default block of 34; many storage + * devices will use a stripe width of 128k, so start there instead. + */ +#define NEW_START_BLOCK 256 + +/* + * Read the EFI label from the config, if a label does not exist then + * pass back the error to the caller. If the caller has passed a non-NULL + * diskaddr argument then we set it to the starting address of the EFI + * partition. + */ +static int +read_efi_label(nvlist_t *config, diskaddr_t *sb) +{ + char *path; + int fd; + char diskname[MAXPATHLEN]; + int err = -1; + + if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) + return (err); + + (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, + strrchr(path, '/')); + if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { + struct dk_gpt *vtoc; + + if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { + if (sb != NULL) + *sb = vtoc->efi_parts[0].p_start; + efi_free(vtoc); + } + (void) close(fd); + } + return (err); +} + +/* + * determine where a partition starts on a disk in the current + * configuration + */ +static diskaddr_t +find_start_block(nvlist_t *config) +{ + nvlist_t **child; + uint_t c, children; + diskaddr_t sb = MAXOFFSET_T; + uint64_t wholedisk; + + if (nvlist_lookup_nvlist_array(config, + ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { + if (nvlist_lookup_uint64(config, + ZPOOL_CONFIG_WHOLE_DISK, + &wholedisk) != 0 || !wholedisk) { + return (MAXOFFSET_T); + } + if (read_efi_label(config, &sb) < 0) + sb = MAXOFFSET_T; + return (sb); + } + + for (c = 0; c < children; c++) { + sb = find_start_block(child[c]); + if (sb != MAXOFFSET_T) { + return (sb); + } + } + return (MAXOFFSET_T); +} + +/* + * Label an individual disk. The name provided is the short name, + * stripped of any leading /dev path. + */ +int +zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) +{ + char path[MAXPATHLEN]; + struct dk_gpt *vtoc; + int fd; + size_t resv = EFI_MIN_RESV_SIZE; + uint64_t slice_size; + diskaddr_t start_block; + char errbuf[1024]; + + /* prepare an error message just in case */ + (void) snprintf(errbuf, sizeof (errbuf), + dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); + + if (zhp) { + nvlist_t *nvroot; + + if (pool_is_bootable(zhp)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "EFI labeled devices are not supported on root " + "pools.")); + return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); + } + + verify(nvlist_lookup_nvlist(zhp->zpool_config, + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + + if (zhp->zpool_start_block == 0) + start_block = find_start_block(nvroot); + else + start_block = zhp->zpool_start_block; + zhp->zpool_start_block = start_block; + } else { + /* new pool */ + start_block = NEW_START_BLOCK; + } + + (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, + BACKUP_SLICE); + + if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { + /* + * This shouldn't happen. We've long since verified that this + * is a valid device. + */ + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "unable to open device")); + return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); + } + + if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { + /* + * The only way this can fail is if we run out of memory, or we + * were unable to read the disk's capacity + */ + if (errno == ENOMEM) + (void) no_memory(hdl); + + (void) close(fd); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "unable to read disk capacity"), name); + + return (zfs_error(hdl, EZFS_NOCAP, errbuf)); + } + + slice_size = vtoc->efi_last_u_lba + 1; + slice_size -= EFI_MIN_RESV_SIZE; + if (start_block == MAXOFFSET_T) + start_block = NEW_START_BLOCK; + slice_size -= start_block; + + vtoc->efi_parts[0].p_start = start_block; + vtoc->efi_parts[0].p_size = slice_size; + + /* + * Why we use V_USR: V_BACKUP confuses users, and is considered + * disposable by some EFI utilities (since EFI doesn't have a backup + * slice). V_UNASSIGNED is supposed to be used only for zero size + * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, + * etc. were all pretty specific. V_USR is as close to reality as we + * can get, in the absence of V_OTHER. + */ + vtoc->efi_parts[0].p_tag = V_USR; + (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); + + vtoc->efi_parts[8].p_start = slice_size + start_block; + vtoc->efi_parts[8].p_size = resv; + vtoc->efi_parts[8].p_tag = V_RESERVED; + + if (efi_write(fd, vtoc) != 0) { + /* + * Some block drivers (like pcata) may not support EFI + * GPT labels. Print out a helpful error message dir- + * ecting the user to manually label the disk and give + * a specific slice. + */ + (void) close(fd); + efi_free(vtoc); + + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "try using fdisk(1M) and then provide a specific slice")); + return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); + } + + (void) close(fd); + efi_free(vtoc); + return (0); +} + +static boolean_t +supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) +{ + char *type; + nvlist_t **child; + uint_t children, c; + + verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); + if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || + strcmp(type, VDEV_TYPE_FILE) == 0 || + strcmp(type, VDEV_TYPE_LOG) == 0 || + strcmp(type, VDEV_TYPE_MISSING) == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "vdev type '%s' is not supported"), type); + (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); + return (B_FALSE); + } + if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0) { + for (c = 0; c < children; c++) { + if (!supported_dump_vdev_type(hdl, child[c], errbuf)) + return (B_FALSE); + } + } + return (B_TRUE); +} + +/* + * check if this zvol is allowable for use as a dump device; zero if + * it is, > 0 if it isn't, < 0 if it isn't a zvol + */ +int +zvol_check_dump_config(char *arg) +{ + zpool_handle_t *zhp = NULL; + nvlist_t *config, *nvroot; + char *p, *volname; + nvlist_t **top; + uint_t toplevels; + libzfs_handle_t *hdl; + char errbuf[1024]; + char poolname[ZPOOL_MAXNAMELEN]; + int pathlen = strlen(ZVOL_FULL_DEV_DIR); + int ret = 1; + + if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { + return (-1); + } + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "dump is not supported on device '%s'"), arg); + + if ((hdl = libzfs_init()) == NULL) + return (1); + libzfs_print_on_error(hdl, B_TRUE); + + volname = arg + pathlen; + + /* check the configuration of the pool */ + if ((p = strchr(volname, '/')) == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "malformed dataset name")); + (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); + return (1); + } else if (p - volname >= ZFS_MAXNAMELEN) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset name is too long")); + (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); + return (1); + } else { + (void) strncpy(poolname, volname, p - volname); + poolname[p - volname] = '\0'; + } + + if ((zhp = zpool_open(hdl, poolname)) == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "could not open pool '%s'"), poolname); + (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); + goto out; + } + config = zpool_get_config(zhp, NULL); + if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "could not obtain vdev configuration for '%s'"), poolname); + (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); + goto out; + } + + verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &top, &toplevels) == 0); + if (toplevels != 1) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' has multiple top level vdevs"), poolname); + (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); + goto out; + } + + if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { + goto out; + } + ret = 0; + +out: + if (zhp) + zpool_close(zhp); + libzfs_fini(hdl); + return (ret); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_sendrecv.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_sendrecv.c new file mode 100644 index 000000000000..a3ed5cea8589 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_sendrecv.c @@ -0,0 +1,2102 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "zfs_namecheck.h" +#include "zfs_prop.h" +#include "libzfs_impl.h" + +#include /* XXX */ + +static int zfs_receive_impl(libzfs_handle_t *, const char *, recvflags_t, + int, avl_tree_t *, char **); + +/* + * Routines for dealing with the AVL tree of fs-nvlists + */ +typedef struct fsavl_node { + avl_node_t fn_node; + nvlist_t *fn_nvfs; + char *fn_snapname; + uint64_t fn_guid; +} fsavl_node_t; + +static int +fsavl_compare(const void *arg1, const void *arg2) +{ + const fsavl_node_t *fn1 = arg1; + const fsavl_node_t *fn2 = arg2; + + if (fn1->fn_guid > fn2->fn_guid) + return (+1); + else if (fn1->fn_guid < fn2->fn_guid) + return (-1); + else + return (0); +} + +/* + * Given the GUID of a snapshot, find its containing filesystem and + * (optionally) name. + */ +static nvlist_t * +fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname) +{ + fsavl_node_t fn_find; + fsavl_node_t *fn; + + fn_find.fn_guid = snapguid; + + fn = avl_find(avl, &fn_find, NULL); + if (fn) { + if (snapname) + *snapname = fn->fn_snapname; + return (fn->fn_nvfs); + } + return (NULL); +} + +static void +fsavl_destroy(avl_tree_t *avl) +{ + fsavl_node_t *fn; + void *cookie; + + if (avl == NULL) + return; + + cookie = NULL; + while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL) + free(fn); + avl_destroy(avl); + free(avl); +} + +static avl_tree_t * +fsavl_create(nvlist_t *fss) +{ + avl_tree_t *fsavl; + nvpair_t *fselem = NULL; + + if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL) + return (NULL); + + avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t), + offsetof(fsavl_node_t, fn_node)); + + while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) { + nvlist_t *nvfs, *snaps; + nvpair_t *snapelem = NULL; + + VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); + VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); + + while ((snapelem = + nvlist_next_nvpair(snaps, snapelem)) != NULL) { + fsavl_node_t *fn; + uint64_t guid; + + VERIFY(0 == nvpair_value_uint64(snapelem, &guid)); + if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) { + fsavl_destroy(fsavl); + return (NULL); + } + fn->fn_nvfs = nvfs; + fn->fn_snapname = nvpair_name(snapelem); + fn->fn_guid = guid; + + /* + * Note: if there are multiple snaps with the + * same GUID, we ignore all but one. + */ + if (avl_find(fsavl, fn, NULL) == NULL) + avl_add(fsavl, fn); + else + free(fn); + } + } + + return (fsavl); +} + +/* + * Routines for dealing with the giant nvlist of fs-nvlists, etc. + */ +typedef struct send_data { + uint64_t parent_fromsnap_guid; + nvlist_t *parent_snaps; + nvlist_t *fss; + nvlist_t *snapprops; + const char *fromsnap; + const char *tosnap; + + /* + * The header nvlist is of the following format: + * { + * "tosnap" -> string + * "fromsnap" -> string (if incremental) + * "fss" -> { + * id -> { + * + * "name" -> string (full name; for debugging) + * "parentfromsnap" -> number (guid of fromsnap in parent) + * + * "props" -> { name -> value (only if set here) } + * "snaps" -> { name (lastname) -> number (guid) } + * "snapprops" -> { name (lastname) -> { name -> value } } + * + * "origin" -> number (guid) (if clone) + * "sent" -> boolean (not on-disk) + * } + * } + * } + * + */ +} send_data_t; + +static void send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv); + +static int +send_iterate_snap(zfs_handle_t *zhp, void *arg) +{ + send_data_t *sd = arg; + uint64_t guid = zhp->zfs_dmustats.dds_guid; + char *snapname; + nvlist_t *nv; + + snapname = strrchr(zhp->zfs_name, '@')+1; + + VERIFY(0 == nvlist_add_uint64(sd->parent_snaps, snapname, guid)); + /* + * NB: if there is no fromsnap here (it's a newly created fs in + * an incremental replication), we will substitute the tosnap. + */ + if ((sd->fromsnap && strcmp(snapname, sd->fromsnap) == 0) || + (sd->parent_fromsnap_guid == 0 && sd->tosnap && + strcmp(snapname, sd->tosnap) == 0)) { + sd->parent_fromsnap_guid = guid; + } + + VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); + send_iterate_prop(zhp, nv); + VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv)); + nvlist_free(nv); + + zfs_close(zhp); + return (0); +} + +static void +send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv) +{ + nvpair_t *elem = NULL; + + while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) { + char *propname = nvpair_name(elem); + zfs_prop_t prop = zfs_name_to_prop(propname); + nvlist_t *propnv; + + if (!zfs_prop_user(propname) && zfs_prop_readonly(prop)) + continue; + + verify(nvpair_value_nvlist(elem, &propnv) == 0); + if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION) { + /* these guys are modifyable, but have no source */ + uint64_t value; + verify(nvlist_lookup_uint64(propnv, + ZPROP_VALUE, &value) == 0); + if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) + continue; + } else { + char *source; + if (nvlist_lookup_string(propnv, + ZPROP_SOURCE, &source) != 0) + continue; + if (strcmp(source, zhp->zfs_name) != 0) + continue; + } + + if (zfs_prop_user(propname) || + zfs_prop_get_type(prop) == PROP_TYPE_STRING) { + char *value; + verify(nvlist_lookup_string(propnv, + ZPROP_VALUE, &value) == 0); + VERIFY(0 == nvlist_add_string(nv, propname, value)); + } else { + uint64_t value; + verify(nvlist_lookup_uint64(propnv, + ZPROP_VALUE, &value) == 0); + VERIFY(0 == nvlist_add_uint64(nv, propname, value)); + } + } +} + +static int +send_iterate_fs(zfs_handle_t *zhp, void *arg) +{ + send_data_t *sd = arg; + nvlist_t *nvfs, *nv; + int rv; + uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid; + uint64_t guid = zhp->zfs_dmustats.dds_guid; + char guidstring[64]; + + VERIFY(0 == nvlist_alloc(&nvfs, NV_UNIQUE_NAME, 0)); + VERIFY(0 == nvlist_add_string(nvfs, "name", zhp->zfs_name)); + VERIFY(0 == nvlist_add_uint64(nvfs, "parentfromsnap", + sd->parent_fromsnap_guid)); + + if (zhp->zfs_dmustats.dds_origin[0]) { + zfs_handle_t *origin = zfs_open(zhp->zfs_hdl, + zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT); + if (origin == NULL) + return (-1); + VERIFY(0 == nvlist_add_uint64(nvfs, "origin", + origin->zfs_dmustats.dds_guid)); + } + + /* iterate over props */ + VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); + send_iterate_prop(zhp, nv); + VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv)); + nvlist_free(nv); + + /* iterate over snaps, and set sd->parent_fromsnap_guid */ + sd->parent_fromsnap_guid = 0; + VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0)); + VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0)); + (void) zfs_iter_snapshots(zhp, send_iterate_snap, sd); + VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps)); + VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops)); + nvlist_free(sd->parent_snaps); + nvlist_free(sd->snapprops); + + /* add this fs to nvlist */ + (void) snprintf(guidstring, sizeof (guidstring), + "0x%llx", (longlong_t)guid); + VERIFY(0 == nvlist_add_nvlist(sd->fss, guidstring, nvfs)); + nvlist_free(nvfs); + + /* iterate over children */ + rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd); + + sd->parent_fromsnap_guid = parent_fromsnap_guid_save; + + zfs_close(zhp); + return (rv); +} + +static int +gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap, + const char *tosnap, nvlist_t **nvlp, avl_tree_t **avlp) +{ + zfs_handle_t *zhp; + send_data_t sd = { 0 }; + int error; + + zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); + if (zhp == NULL) + return (EZFS_BADTYPE); + + VERIFY(0 == nvlist_alloc(&sd.fss, NV_UNIQUE_NAME, 0)); + sd.fromsnap = fromsnap; + sd.tosnap = tosnap; + + if ((error = send_iterate_fs(zhp, &sd)) != 0) { + nvlist_free(sd.fss); + if (avlp != NULL) + *avlp = NULL; + *nvlp = NULL; + return (error); + } + + if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) { + nvlist_free(sd.fss); + *nvlp = NULL; + return (EZFS_NOMEM); + } + + *nvlp = sd.fss; + return (0); +} + +/* + * Routines for dealing with the sorted snapshot functionality + */ +typedef struct zfs_node { + zfs_handle_t *zn_handle; + avl_node_t zn_avlnode; +} zfs_node_t; + +static int +zfs_sort_snaps(zfs_handle_t *zhp, void *data) +{ + avl_tree_t *avl = data; + zfs_node_t *node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t)); + + node->zn_handle = zhp; + avl_add(avl, node); + return (0); +} + +/* ARGSUSED */ +static int +zfs_snapshot_compare(const void *larg, const void *rarg) +{ + zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle; + zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle; + uint64_t lcreate, rcreate; + + /* + * Sort them according to creation time. We use the hidden + * CREATETXG property to get an absolute ordering of snapshots. + */ + lcreate = zfs_prop_get_int(l, ZFS_PROP_CREATETXG); + rcreate = zfs_prop_get_int(r, ZFS_PROP_CREATETXG); + + if (lcreate < rcreate) + return (-1); + else if (lcreate > rcreate) + return (+1); + else + return (0); +} + +static int +zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data) +{ + int ret = 0; + zfs_node_t *node; + avl_tree_t avl; + void *cookie = NULL; + + avl_create(&avl, zfs_snapshot_compare, + sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode)); + + ret = zfs_iter_snapshots(zhp, zfs_sort_snaps, &avl); + + for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node)) + ret |= callback(node->zn_handle, data); + + while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL) + free(node); + + avl_destroy(&avl); + + return (ret); +} + +/* + * Routines specific to "zfs send" + */ +typedef struct send_dump_data { + /* these are all just the short snapname (the part after the @) */ + const char *fromsnap; + const char *tosnap; + char lastsnap[ZFS_MAXNAMELEN]; + boolean_t seenfrom, seento, replicate, doall, fromorigin; + boolean_t verbose; + int outfd; + boolean_t err; + nvlist_t *fss; + avl_tree_t *fsavl; +} send_dump_data_t; + +/* + * Dumps a backup of the given snapshot (incremental from fromsnap if it's not + * NULL) to the file descriptor specified by outfd. + */ +static int +dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, boolean_t fromorigin, + int outfd) +{ + zfs_cmd_t zc = { 0 }; + libzfs_handle_t *hdl = zhp->zfs_hdl; + + assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); + assert(fromsnap == NULL || fromsnap[0] == '\0' || !fromorigin); + + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + if (fromsnap) + (void) strlcpy(zc.zc_value, fromsnap, sizeof (zc.zc_value)); + zc.zc_cookie = outfd; + zc.zc_obj = fromorigin; + + if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SEND, &zc) != 0) { + char errbuf[1024]; + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "warning: cannot send '%s'"), zhp->zfs_name); + + switch (errno) { + + case EXDEV: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "not an earlier snapshot from the same fs")); + return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); + + case ENOENT: + if (zfs_dataset_exists(hdl, zc.zc_name, + ZFS_TYPE_SNAPSHOT)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "incremental source (@%s) does not exist"), + zc.zc_value); + } + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + + case EDQUOT: + case EFBIG: + case EIO: + case ENOLINK: + case ENOSPC: + case ENOSTR: + case ENXIO: + case EPIPE: + case ERANGE: + case EFAULT: + case EROFS: + zfs_error_aux(hdl, strerror(errno)); + return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); + + default: + return (zfs_standard_error(hdl, errno, errbuf)); + } + } + + return (0); +} + +static int +dump_snapshot(zfs_handle_t *zhp, void *arg) +{ + send_dump_data_t *sdd = arg; + const char *thissnap; + int err; + + thissnap = strchr(zhp->zfs_name, '@') + 1; + + if (sdd->fromsnap && !sdd->seenfrom && + strcmp(sdd->fromsnap, thissnap) == 0) { + sdd->seenfrom = B_TRUE; + (void) strcpy(sdd->lastsnap, thissnap); + zfs_close(zhp); + return (0); + } + + if (sdd->seento || !sdd->seenfrom) { + zfs_close(zhp); + return (0); + } + + /* send it */ + if (sdd->verbose) { + (void) fprintf(stderr, "sending from @%s to %s\n", + sdd->lastsnap, zhp->zfs_name); + } + + err = dump_ioctl(zhp, sdd->lastsnap, + sdd->lastsnap[0] == '\0' && (sdd->fromorigin || sdd->replicate), + sdd->outfd); + + if (!sdd->seento && strcmp(sdd->tosnap, thissnap) == 0) + sdd->seento = B_TRUE; + + (void) strcpy(sdd->lastsnap, thissnap); + zfs_close(zhp); + return (err); +} + +static int +dump_filesystem(zfs_handle_t *zhp, void *arg) +{ + int rv = 0; + send_dump_data_t *sdd = arg; + boolean_t missingfrom = B_FALSE; + zfs_cmd_t zc = { 0 }; + + (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", + zhp->zfs_name, sdd->tosnap); + if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { + (void) fprintf(stderr, "WARNING: " + "could not send %s@%s: does not exist\n", + zhp->zfs_name, sdd->tosnap); + sdd->err = B_TRUE; + return (0); + } + + if (sdd->replicate && sdd->fromsnap) { + /* + * If this fs does not have fromsnap, and we're doing + * recursive, we need to send a full stream from the + * beginning (or an incremental from the origin if this + * is a clone). If we're doing non-recursive, then let + * them get the error. + */ + (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", + zhp->zfs_name, sdd->fromsnap); + if (ioctl(zhp->zfs_hdl->libzfs_fd, + ZFS_IOC_OBJSET_STATS, &zc) != 0) { + missingfrom = B_TRUE; + } + } + + if (sdd->doall) { + sdd->seenfrom = sdd->seento = sdd->lastsnap[0] = 0; + if (sdd->fromsnap == NULL || missingfrom) + sdd->seenfrom = B_TRUE; + + rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg); + if (!sdd->seenfrom) { + (void) fprintf(stderr, + "WARNING: could not send %s@%s:\n" + "incremental source (%s@%s) does not exist\n", + zhp->zfs_name, sdd->tosnap, + zhp->zfs_name, sdd->fromsnap); + sdd->err = B_TRUE; + } else if (!sdd->seento) { + (void) fprintf(stderr, + "WARNING: could not send %s@%s:\n" + "incremental source (%s@%s) " + "is not earlier than it\n", + zhp->zfs_name, sdd->tosnap, + zhp->zfs_name, sdd->fromsnap); + sdd->err = B_TRUE; + } + } else { + zfs_handle_t *snapzhp; + char snapname[ZFS_MAXNAMELEN]; + + (void) snprintf(snapname, sizeof (snapname), "%s@%s", + zfs_get_name(zhp), sdd->tosnap); + snapzhp = zfs_open(zhp->zfs_hdl, snapname, ZFS_TYPE_SNAPSHOT); + if (snapzhp == NULL) { + rv = -1; + } else { + rv = dump_ioctl(snapzhp, + missingfrom ? NULL : sdd->fromsnap, + sdd->fromorigin || missingfrom, + sdd->outfd); + sdd->seento = B_TRUE; + zfs_close(snapzhp); + } + } + + return (rv); +} + +static int +dump_filesystems(zfs_handle_t *rzhp, void *arg) +{ + send_dump_data_t *sdd = arg; + nvpair_t *fspair; + boolean_t needagain, progress; + + if (!sdd->replicate) + return (dump_filesystem(rzhp, sdd)); + +again: + needagain = progress = B_FALSE; + for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; + fspair = nvlist_next_nvpair(sdd->fss, fspair)) { + nvlist_t *fslist; + char *fsname; + zfs_handle_t *zhp; + int err; + uint64_t origin_guid = 0; + nvlist_t *origin_nv; + + VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0); + if (nvlist_lookup_boolean(fslist, "sent") == 0) + continue; + + VERIFY(nvlist_lookup_string(fslist, "name", &fsname) == 0); + (void) nvlist_lookup_uint64(fslist, "origin", &origin_guid); + + origin_nv = fsavl_find(sdd->fsavl, origin_guid, NULL); + if (origin_nv && + nvlist_lookup_boolean(origin_nv, "sent") == ENOENT) { + /* + * origin has not been sent yet; + * skip this clone. + */ + needagain = B_TRUE; + continue; + } + + zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET); + if (zhp == NULL) + return (-1); + err = dump_filesystem(zhp, sdd); + VERIFY(nvlist_add_boolean(fslist, "sent") == 0); + progress = B_TRUE; + zfs_close(zhp); + if (err) + return (err); + } + if (needagain) { + assert(progress); + goto again; + } + return (0); +} + +/* + * Dumps a backup of tosnap, incremental from fromsnap if it isn't NULL. + * If 'doall', dump all intermediate snaps. + * If 'replicate', dump special header and do recursively. + */ +int +zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap, + boolean_t replicate, boolean_t doall, boolean_t fromorigin, + boolean_t verbose, int outfd) +{ + char errbuf[1024]; + send_dump_data_t sdd = { 0 }; + int err; + nvlist_t *fss = NULL; + avl_tree_t *fsavl = NULL; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot send '%s'"), zhp->zfs_name); + + if (fromsnap && fromsnap[0] == '\0') { + zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, + "zero-length incremental source")); + return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); + } + + if (replicate || doall) { + dmu_replay_record_t drr = { 0 }; + char *packbuf = NULL; + size_t buflen = 0; + zio_cksum_t zc = { 0 }; + + assert(fromsnap || doall); + + if (replicate) { + nvlist_t *hdrnv; + + VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0)); + if (fromsnap) { + VERIFY(0 == nvlist_add_string(hdrnv, + "fromsnap", fromsnap)); + } + VERIFY(0 == nvlist_add_string(hdrnv, "tosnap", tosnap)); + + err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name, + fromsnap, tosnap, &fss, &fsavl); + if (err) + return (err); + VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss)); + err = nvlist_pack(hdrnv, &packbuf, &buflen, + NV_ENCODE_XDR, 0); + nvlist_free(hdrnv); + if (err) { + fsavl_destroy(fsavl); + nvlist_free(fss); + return (zfs_standard_error(zhp->zfs_hdl, + err, errbuf)); + } + } + + /* write first begin record */ + drr.drr_type = DRR_BEGIN; + drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; + drr.drr_u.drr_begin.drr_version = DMU_BACKUP_HEADER_VERSION; + (void) snprintf(drr.drr_u.drr_begin.drr_toname, + sizeof (drr.drr_u.drr_begin.drr_toname), + "%s@%s", zhp->zfs_name, tosnap); + drr.drr_payloadlen = buflen; + fletcher_4_incremental_native(&drr, sizeof (drr), &zc); + err = write(outfd, &drr, sizeof (drr)); + + /* write header nvlist */ + if (err != -1) { + fletcher_4_incremental_native(packbuf, buflen, &zc); + err = write(outfd, packbuf, buflen); + } + free(packbuf); + if (err == -1) { + fsavl_destroy(fsavl); + nvlist_free(fss); + return (zfs_standard_error(zhp->zfs_hdl, + errno, errbuf)); + } + + /* write end record */ + if (err != -1) { + bzero(&drr, sizeof (drr)); + drr.drr_type = DRR_END; + drr.drr_u.drr_end.drr_checksum = zc; + err = write(outfd, &drr, sizeof (drr)); + if (err == -1) { + fsavl_destroy(fsavl); + nvlist_free(fss); + return (zfs_standard_error(zhp->zfs_hdl, + errno, errbuf)); + } + } + } + + /* dump each stream */ + sdd.fromsnap = fromsnap; + sdd.tosnap = tosnap; + sdd.outfd = outfd; + sdd.replicate = replicate; + sdd.doall = doall; + sdd.fromorigin = fromorigin; + sdd.fss = fss; + sdd.fsavl = fsavl; + sdd.verbose = verbose; + err = dump_filesystems(zhp, &sdd); + fsavl_destroy(fsavl); + nvlist_free(fss); + + if (replicate || doall) { + /* + * write final end record. NB: want to do this even if + * there was some error, because it might not be totally + * failed. + */ + dmu_replay_record_t drr = { 0 }; + drr.drr_type = DRR_END; + if (write(outfd, &drr, sizeof (drr)) == -1) { + return (zfs_standard_error(zhp->zfs_hdl, + errno, errbuf)); + } + } + + return (err || sdd.err); +} + +/* + * Routines specific to "zfs recv" + */ + +static int +recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen, + boolean_t byteswap, zio_cksum_t *zc) +{ + char *cp = buf; + int rv; + int len = ilen; + + do { + rv = read(fd, cp, len); + cp += rv; + len -= rv; + } while (rv > 0); + + if (rv < 0 || len != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "failed to read from stream")); + return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN, + "cannot receive"))); + } + + if (zc) { + if (byteswap) + fletcher_4_incremental_byteswap(buf, ilen, zc); + else + fletcher_4_incremental_native(buf, ilen, zc); + } + return (0); +} + +static int +recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp, + boolean_t byteswap, zio_cksum_t *zc) +{ + char *buf; + int err; + + buf = zfs_alloc(hdl, len); + if (buf == NULL) + return (ENOMEM); + + err = recv_read(hdl, fd, buf, len, byteswap, zc); + if (err != 0) { + free(buf); + return (err); + } + + err = nvlist_unpack(buf, len, nvp, 0); + free(buf); + if (err != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " + "stream (malformed nvlist)")); + return (EINVAL); + } + return (0); +} + +static int +recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname, + int baselen, char *newname, recvflags_t flags) +{ + static int seq; + zfs_cmd_t zc = { 0 }; + int err; + prop_changelist_t *clp; + zfs_handle_t *zhp; + + zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); + if (zhp == NULL) + return (-1); + clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, + flags.force ? MS_FORCE : 0); + zfs_close(zhp); + if (clp == NULL) + return (-1); + err = changelist_prefix(clp); + if (err) + return (err); + + if (tryname) { + (void) strcpy(newname, tryname); + + zc.zc_objset_type = DMU_OST_ZFS; + (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); + (void) strlcpy(zc.zc_value, tryname, sizeof (zc.zc_value)); + + if (flags.verbose) { + (void) printf("attempting rename %s to %s\n", + zc.zc_name, zc.zc_value); + } + err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc); + if (err == 0) + changelist_rename(clp, name, tryname); + } else { + err = ENOENT; + } + + if (err != 0 && strncmp(name+baselen, "recv-", 5) != 0) { + seq++; + + (void) strncpy(newname, name, baselen); + (void) snprintf(newname+baselen, ZFS_MAXNAMELEN-baselen, + "recv-%u-%u", getpid(), seq); + (void) strlcpy(zc.zc_value, newname, sizeof (zc.zc_value)); + + if (flags.verbose) { + (void) printf("failed - trying rename %s to %s\n", + zc.zc_name, zc.zc_value); + } + err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc); + if (err == 0) + changelist_rename(clp, name, newname); + if (err && flags.verbose) { + (void) printf("failed (%u) - " + "will try again on next pass\n", errno); + } + err = EAGAIN; + } else if (flags.verbose) { + if (err == 0) + (void) printf("success\n"); + else + (void) printf("failed (%u)\n", errno); + } + + (void) changelist_postfix(clp); + changelist_free(clp); + + return (err); +} + +static int +recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen, + char *newname, recvflags_t flags) +{ + zfs_cmd_t zc = { 0 }; + int err = 0; + prop_changelist_t *clp; + zfs_handle_t *zhp; + + zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); + if (zhp == NULL) + return (-1); + clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, + flags.force ? MS_FORCE : 0); + zfs_close(zhp); + if (clp == NULL) + return (-1); + err = changelist_prefix(clp); + if (err) + return (err); + + zc.zc_objset_type = DMU_OST_ZFS; + (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); + + if (flags.verbose) + (void) printf("attempting destroy %s\n", zc.zc_name); + err = ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc); + + if (err == 0) { + if (flags.verbose) + (void) printf("success\n"); + changelist_remove(clp, zc.zc_name); + } + + (void) changelist_postfix(clp); + changelist_free(clp); + + if (err != 0) + err = recv_rename(hdl, name, NULL, baselen, newname, flags); + + return (err); +} + +typedef struct guid_to_name_data { + uint64_t guid; + char *name; +} guid_to_name_data_t; + +static int +guid_to_name_cb(zfs_handle_t *zhp, void *arg) +{ + guid_to_name_data_t *gtnd = arg; + int err; + + if (zhp->zfs_dmustats.dds_guid == gtnd->guid) { + (void) strcpy(gtnd->name, zhp->zfs_name); + return (EEXIST); + } + err = zfs_iter_children(zhp, guid_to_name_cb, gtnd); + zfs_close(zhp); + return (err); +} + +static int +guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid, + char *name) +{ + /* exhaustive search all local snapshots */ + guid_to_name_data_t gtnd; + int err = 0; + zfs_handle_t *zhp; + char *cp; + + gtnd.guid = guid; + gtnd.name = name; + + if (strchr(parent, '@') == NULL) { + zhp = make_dataset_handle(hdl, parent); + if (zhp != NULL) { + err = zfs_iter_children(zhp, guid_to_name_cb, >nd); + zfs_close(zhp); + if (err == EEXIST) + return (0); + } + } + + cp = strchr(parent, '/'); + if (cp) + *cp = '\0'; + zhp = make_dataset_handle(hdl, parent); + if (cp) + *cp = '/'; + + if (zhp) { + err = zfs_iter_children(zhp, guid_to_name_cb, >nd); + zfs_close(zhp); + } + + return (err == EEXIST ? 0 : ENOENT); + +} + +/* + * Return true if dataset guid1 is created before guid2. + */ +static int +created_before(libzfs_handle_t *hdl, avl_tree_t *avl, + uint64_t guid1, uint64_t guid2) +{ + nvlist_t *nvfs; + char *fsname, *snapname; + char buf[ZFS_MAXNAMELEN]; + int rv; + zfs_node_t zn1, zn2; + + if (guid2 == 0) + return (0); + if (guid1 == 0) + return (1); + + nvfs = fsavl_find(avl, guid1, &snapname); + VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); + (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); + zn1.zn_handle = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); + if (zn1.zn_handle == NULL) + return (-1); + + nvfs = fsavl_find(avl, guid2, &snapname); + VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); + (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); + zn2.zn_handle = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); + if (zn2.zn_handle == NULL) { + zfs_close(zn2.zn_handle); + return (-1); + } + + rv = (zfs_snapshot_compare(&zn1, &zn2) == -1); + + zfs_close(zn1.zn_handle); + zfs_close(zn2.zn_handle); + + return (rv); +} + +static int +recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs, + recvflags_t flags, nvlist_t *stream_nv, avl_tree_t *stream_avl) +{ + nvlist_t *local_nv; + avl_tree_t *local_avl; + nvpair_t *fselem, *nextfselem; + char *tosnap, *fromsnap; + char newname[ZFS_MAXNAMELEN]; + int error; + boolean_t needagain, progress; + + VERIFY(0 == nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap)); + VERIFY(0 == nvlist_lookup_string(stream_nv, "tosnap", &tosnap)); + + if (flags.dryrun) + return (0); + +again: + needagain = progress = B_FALSE; + + if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL, + &local_nv, &local_avl)) != 0) + return (error); + + /* + * Process deletes and renames + */ + for (fselem = nvlist_next_nvpair(local_nv, NULL); + fselem; fselem = nextfselem) { + nvlist_t *nvfs, *snaps; + nvlist_t *stream_nvfs = NULL; + nvpair_t *snapelem, *nextsnapelem; + uint64_t fromguid = 0; + uint64_t originguid = 0; + uint64_t stream_originguid = 0; + uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid; + char *fsname, *stream_fsname; + + nextfselem = nvlist_next_nvpair(local_nv, fselem); + + VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); + VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); + VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); + VERIFY(0 == nvlist_lookup_uint64(nvfs, "parentfromsnap", + &parent_fromsnap_guid)); + (void) nvlist_lookup_uint64(nvfs, "origin", &originguid); + + /* + * First find the stream's fs, so we can check for + * a different origin (due to "zfs promote") + */ + for (snapelem = nvlist_next_nvpair(snaps, NULL); + snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) { + uint64_t thisguid; + + VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); + stream_nvfs = fsavl_find(stream_avl, thisguid, NULL); + + if (stream_nvfs != NULL) + break; + } + + /* check for promote */ + (void) nvlist_lookup_uint64(stream_nvfs, "origin", + &stream_originguid); + if (stream_nvfs && originguid != stream_originguid) { + switch (created_before(hdl, local_avl, + stream_originguid, originguid)) { + case 1: { + /* promote it! */ + zfs_cmd_t zc = { 0 }; + nvlist_t *origin_nvfs; + char *origin_fsname; + + if (flags.verbose) + (void) printf("promoting %s\n", fsname); + + origin_nvfs = fsavl_find(local_avl, originguid, + NULL); + VERIFY(0 == nvlist_lookup_string(origin_nvfs, + "name", &origin_fsname)); + (void) strlcpy(zc.zc_value, origin_fsname, + sizeof (zc.zc_value)); + (void) strlcpy(zc.zc_name, fsname, + sizeof (zc.zc_name)); + error = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); + if (error == 0) + progress = B_TRUE; + break; + } + default: + break; + case -1: + fsavl_destroy(local_avl); + nvlist_free(local_nv); + return (-1); + } + /* + * We had/have the wrong origin, therefore our + * list of snapshots is wrong. Need to handle + * them on the next pass. + */ + needagain = B_TRUE; + continue; + } + + for (snapelem = nvlist_next_nvpair(snaps, NULL); + snapelem; snapelem = nextsnapelem) { + uint64_t thisguid; + char *stream_snapname; + nvlist_t *found, *props; + + nextsnapelem = nvlist_next_nvpair(snaps, snapelem); + + VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); + found = fsavl_find(stream_avl, thisguid, + &stream_snapname); + + /* check for delete */ + if (found == NULL) { + char name[ZFS_MAXNAMELEN]; + + if (!flags.force) + continue; + + (void) snprintf(name, sizeof (name), "%s@%s", + fsname, nvpair_name(snapelem)); + + error = recv_destroy(hdl, name, + strlen(fsname)+1, newname, flags); + if (error) + needagain = B_TRUE; + else + progress = B_TRUE; + continue; + } + + stream_nvfs = found; + + if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops", + &props) && 0 == nvlist_lookup_nvlist(props, + stream_snapname, &props)) { + zfs_cmd_t zc = { 0 }; + + zc.zc_cookie = B_TRUE; /* clear current props */ + (void) snprintf(zc.zc_name, sizeof (zc.zc_name), + "%s@%s", fsname, nvpair_name(snapelem)); + if (zcmd_write_src_nvlist(hdl, &zc, + props) == 0) { + (void) zfs_ioctl(hdl, + ZFS_IOC_SET_PROP, &zc); + zcmd_free_nvlists(&zc); + } + } + + /* check for different snapname */ + if (strcmp(nvpair_name(snapelem), + stream_snapname) != 0) { + char name[ZFS_MAXNAMELEN]; + char tryname[ZFS_MAXNAMELEN]; + + (void) snprintf(name, sizeof (name), "%s@%s", + fsname, nvpair_name(snapelem)); + (void) snprintf(tryname, sizeof (name), "%s@%s", + fsname, stream_snapname); + + error = recv_rename(hdl, name, tryname, + strlen(fsname)+1, newname, flags); + if (error) + needagain = B_TRUE; + else + progress = B_TRUE; + } + + if (strcmp(stream_snapname, fromsnap) == 0) + fromguid = thisguid; + } + + /* check for delete */ + if (stream_nvfs == NULL) { + if (!flags.force) + continue; + + error = recv_destroy(hdl, fsname, strlen(tofs)+1, + newname, flags); + if (error) + needagain = B_TRUE; + else + progress = B_TRUE; + continue; + } + + if (fromguid == 0 && flags.verbose) { + (void) printf("local fs %s does not have fromsnap " + "(%s in stream); must have been deleted locally; " + "ignoring\n", fsname, fromsnap); + continue; + } + + VERIFY(0 == nvlist_lookup_string(stream_nvfs, + "name", &stream_fsname)); + VERIFY(0 == nvlist_lookup_uint64(stream_nvfs, + "parentfromsnap", &stream_parent_fromsnap_guid)); + + /* check for rename */ + if ((stream_parent_fromsnap_guid != 0 && + stream_parent_fromsnap_guid != parent_fromsnap_guid) || + strcmp(strrchr(fsname, '/'), + strrchr(stream_fsname, '/')) != 0) { + nvlist_t *parent; + char tryname[ZFS_MAXNAMELEN]; + + parent = fsavl_find(local_avl, + stream_parent_fromsnap_guid, NULL); + /* + * NB: parent might not be found if we used the + * tosnap for stream_parent_fromsnap_guid, + * because the parent is a newly-created fs; + * we'll be able to rename it after we recv the + * new fs. + */ + if (parent != NULL) { + char *pname; + + VERIFY(0 == nvlist_lookup_string(parent, "name", + &pname)); + (void) snprintf(tryname, sizeof (tryname), + "%s%s", pname, strrchr(stream_fsname, '/')); + } else { + tryname[0] = '\0'; + if (flags.verbose) { + (void) printf("local fs %s new parent " + "not found\n", fsname); + } + } + + error = recv_rename(hdl, fsname, tryname, + strlen(tofs)+1, newname, flags); + if (error) + needagain = B_TRUE; + else + progress = B_TRUE; + } + } + + fsavl_destroy(local_avl); + nvlist_free(local_nv); + + if (needagain && progress) { + /* do another pass to fix up temporary names */ + if (flags.verbose) + (void) printf("another pass:\n"); + goto again; + } + + return (needagain); +} + +static int +zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname, + recvflags_t flags, dmu_replay_record_t *drr, zio_cksum_t *zc, + char **top_zfs) +{ + nvlist_t *stream_nv = NULL; + avl_tree_t *stream_avl = NULL; + char *fromsnap = NULL; + char tofs[ZFS_MAXNAMELEN]; + char errbuf[1024]; + dmu_replay_record_t drre; + int error; + boolean_t anyerr = B_FALSE; + boolean_t softerr = B_FALSE; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot receive")); + + if (strchr(destname, '@')) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "can not specify snapshot name for multi-snapshot stream")); + return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); + } + + assert(drr->drr_type == DRR_BEGIN); + assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC); + assert(drr->drr_u.drr_begin.drr_version == DMU_BACKUP_HEADER_VERSION); + + /* + * Read in the nvlist from the stream. + */ + if (drr->drr_payloadlen != 0) { + if (!flags.isprefix) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "must use -d to receive replication " + "(send -R) stream")); + return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); + } + + error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen, + &stream_nv, flags.byteswap, zc); + if (error) { + error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); + goto out; + } + } + + /* + * Read in the end record and verify checksum. + */ + if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre), + flags.byteswap, NULL))) + goto out; + if (flags.byteswap) { + drre.drr_type = BSWAP_32(drre.drr_type); + drre.drr_u.drr_end.drr_checksum.zc_word[0] = + BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]); + drre.drr_u.drr_end.drr_checksum.zc_word[1] = + BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]); + drre.drr_u.drr_end.drr_checksum.zc_word[2] = + BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]); + drre.drr_u.drr_end.drr_checksum.zc_word[3] = + BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]); + } + if (drre.drr_type != DRR_END) { + error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); + goto out; + } + if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "incorrect header checksum")); + error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); + goto out; + } + + (void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap); + + if (drr->drr_payloadlen != 0) { + nvlist_t *stream_fss; + + VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss", + &stream_fss)); + if ((stream_avl = fsavl_create(stream_fss)) == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "couldn't allocate avl tree")); + error = zfs_error(hdl, EZFS_NOMEM, errbuf); + goto out; + } + + if (fromsnap != NULL) { + (void) strlcpy(tofs, destname, ZFS_MAXNAMELEN); + if (flags.isprefix) { + int i = strcspn(drr->drr_u.drr_begin.drr_toname, + "/@"); + /* zfs_receive_one() will create_parents() */ + (void) strlcat(tofs, + &drr->drr_u.drr_begin.drr_toname[i], + ZFS_MAXNAMELEN); + *strchr(tofs, '@') = '\0'; + } + softerr = recv_incremental_replication(hdl, tofs, + flags, stream_nv, stream_avl); + } + } + + + /* Finally, receive each contained stream */ + do { + /* + * we should figure out if it has a recoverable + * error, in which case do a recv_skip() and drive on. + * Note, if we fail due to already having this guid, + * zfs_receive_one() will take care of it (ie, + * recv_skip() and return 0). + */ + error = zfs_receive_impl(hdl, destname, flags, fd, + stream_avl, top_zfs); + if (error == ENODATA) { + error = 0; + break; + } + anyerr |= error; + } while (error == 0); + + if (drr->drr_payloadlen != 0 && fromsnap != NULL) { + /* + * Now that we have the fs's they sent us, try the + * renames again. + */ + softerr = recv_incremental_replication(hdl, tofs, flags, + stream_nv, stream_avl); + } + +out: + fsavl_destroy(stream_avl); + if (stream_nv) + nvlist_free(stream_nv); + if (softerr) + error = -2; + if (anyerr) + error = -1; + return (error); +} + +static int +recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap) +{ + dmu_replay_record_t *drr; + void *buf = malloc(1<<20); + + /* XXX would be great to use lseek if possible... */ + drr = buf; + + while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t), + byteswap, NULL) == 0) { + if (byteswap) + drr->drr_type = BSWAP_32(drr->drr_type); + + switch (drr->drr_type) { + case DRR_BEGIN: + /* NB: not to be used on v2 stream packages */ + assert(drr->drr_payloadlen == 0); + break; + + case DRR_END: + free(buf); + return (0); + + case DRR_OBJECT: + if (byteswap) { + drr->drr_u.drr_object.drr_bonuslen = + BSWAP_32(drr->drr_u.drr_object. + drr_bonuslen); + } + (void) recv_read(hdl, fd, buf, + P2ROUNDUP(drr->drr_u.drr_object.drr_bonuslen, 8), + B_FALSE, NULL); + break; + + case DRR_WRITE: + if (byteswap) { + drr->drr_u.drr_write.drr_length = + BSWAP_64(drr->drr_u.drr_write.drr_length); + } + (void) recv_read(hdl, fd, buf, + drr->drr_u.drr_write.drr_length, B_FALSE, NULL); + break; + + case DRR_FREEOBJECTS: + case DRR_FREE: + break; + + default: + assert(!"invalid record type"); + } + } + + free(buf); + return (-1); +} + +/* + * Restores a backup of tosnap from the file descriptor specified by infd. + */ +static int +zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, + recvflags_t flags, dmu_replay_record_t *drr, + dmu_replay_record_t *drr_noswap, avl_tree_t *stream_avl, + char **top_zfs) +{ + zfs_cmd_t zc = { 0 }; + time_t begin_time; + int ioctl_err, ioctl_errno, err, choplen; + char *cp; + struct drr_begin *drrb = &drr->drr_u.drr_begin; + char errbuf[1024]; + char chopprefix[ZFS_MAXNAMELEN]; + boolean_t newfs = B_FALSE; + boolean_t stream_wantsnewfs; + uint64_t parent_snapguid = 0; + prop_changelist_t *clp = NULL; + nvlist_t *snapprops_nvlist = NULL; + + begin_time = time(NULL); + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot receive")); + + if (stream_avl != NULL) { + char *snapname; + nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid, + &snapname); + nvlist_t *props; + int ret; + + (void) nvlist_lookup_uint64(fs, "parentfromsnap", + &parent_snapguid); + err = nvlist_lookup_nvlist(fs, "props", &props); + if (err) + VERIFY(0 == nvlist_alloc(&props, NV_UNIQUE_NAME, 0)); + + if (flags.canmountoff) { + VERIFY(0 == nvlist_add_uint64(props, + zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0)); + } + ret = zcmd_write_src_nvlist(hdl, &zc, props); + if (err) + nvlist_free(props); + + if (0 == nvlist_lookup_nvlist(fs, "snapprops", &props)) { + VERIFY(0 == nvlist_lookup_nvlist(props, + snapname, &snapprops_nvlist)); + } + + if (ret != 0) + return (-1); + } + + /* + * Determine how much of the snapshot name stored in the stream + * we are going to tack on to the name they specified on the + * command line, and how much we are going to chop off. + * + * If they specified a snapshot, chop the entire name stored in + * the stream. + */ + (void) strcpy(chopprefix, drrb->drr_toname); + if (flags.isprefix) { + /* + * They specified a fs with -d, we want to tack on + * everything but the pool name stored in the stream + */ + if (strchr(tosnap, '@')) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " + "argument - snapshot not allowed with -d")); + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + } + cp = strchr(chopprefix, '/'); + if (cp == NULL) + cp = strchr(chopprefix, '@'); + *cp = '\0'; + } else if (strchr(tosnap, '@') == NULL) { + /* + * If they specified a filesystem without -d, we want to + * tack on everything after the fs specified in the + * first name from the stream. + */ + cp = strchr(chopprefix, '@'); + *cp = '\0'; + } + choplen = strlen(chopprefix); + + /* + * Determine name of destination snapshot, store in zc_value. + */ + (void) strcpy(zc.zc_value, tosnap); + (void) strncat(zc.zc_value, drrb->drr_toname+choplen, + sizeof (zc.zc_value)); + if (!zfs_name_valid(zc.zc_value, ZFS_TYPE_SNAPSHOT)) { + zcmd_free_nvlists(&zc); + return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); + } + + /* + * Determine the name of the origin snapshot, store in zc_string. + */ + if (drrb->drr_flags & DRR_FLAG_CLONE) { + if (guid_to_name(hdl, tosnap, + drrb->drr_fromguid, zc.zc_string) != 0) { + zcmd_free_nvlists(&zc); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "local origin for clone %s does not exist"), + zc.zc_value); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } + if (flags.verbose) + (void) printf("found clone origin %s\n", zc.zc_string); + } + + stream_wantsnewfs = (drrb->drr_fromguid == NULL || + (drrb->drr_flags & DRR_FLAG_CLONE)); + + if (stream_wantsnewfs) { + /* + * if the parent fs does not exist, look for it based on + * the parent snap GUID + */ + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot receive new filesystem stream")); + + (void) strcpy(zc.zc_name, zc.zc_value); + cp = strrchr(zc.zc_name, '/'); + if (cp) + *cp = '\0'; + if (cp && + !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { + char suffix[ZFS_MAXNAMELEN]; + (void) strcpy(suffix, strrchr(zc.zc_value, '/')); + if (guid_to_name(hdl, tosnap, parent_snapguid, + zc.zc_value) == 0) { + *strchr(zc.zc_value, '@') = '\0'; + (void) strcat(zc.zc_value, suffix); + } + } + } else { + /* + * if the fs does not exist, look for it based on the + * fromsnap GUID + */ + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot receive incremental stream")); + + (void) strcpy(zc.zc_name, zc.zc_value); + *strchr(zc.zc_name, '@') = '\0'; + + if (!zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { + char snap[ZFS_MAXNAMELEN]; + (void) strcpy(snap, strchr(zc.zc_value, '@')); + if (guid_to_name(hdl, tosnap, drrb->drr_fromguid, + zc.zc_value) == 0) { + *strchr(zc.zc_value, '@') = '\0'; + (void) strcat(zc.zc_value, snap); + } + } + } + + (void) strcpy(zc.zc_name, zc.zc_value); + *strchr(zc.zc_name, '@') = '\0'; + + if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { + zfs_handle_t *zhp; + /* + * Destination fs exists. Therefore this should either + * be an incremental, or the stream specifies a new fs + * (full stream or clone) and they want us to blow it + * away (and have therefore specified -F and removed any + * snapshots). + */ + + if (stream_wantsnewfs) { + if (!flags.force) { + zcmd_free_nvlists(&zc); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination '%s' exists\n" + "must specify -F to overwrite it"), + zc.zc_name); + return (zfs_error(hdl, EZFS_EXISTS, errbuf)); + } + if (ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, + &zc) == 0) { + zcmd_free_nvlists(&zc); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination has snapshots (eg. %s)\n" + "must destroy them to overwrite it"), + zc.zc_name); + return (zfs_error(hdl, EZFS_EXISTS, errbuf)); + } + } + + if ((zhp = zfs_open(hdl, zc.zc_name, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { + zcmd_free_nvlists(&zc); + return (-1); + } + + if (stream_wantsnewfs && + zhp->zfs_dmustats.dds_origin[0]) { + zcmd_free_nvlists(&zc); + zfs_close(zhp); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination '%s' is a clone\n" + "must destroy it to overwrite it"), + zc.zc_name); + return (zfs_error(hdl, EZFS_EXISTS, errbuf)); + } + + if (!flags.dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM && + stream_wantsnewfs) { + /* We can't do online recv in this case */ + clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0); + if (clp == NULL) { + zcmd_free_nvlists(&zc); + return (-1); + } + if (changelist_prefix(clp) != 0) { + changelist_free(clp); + zcmd_free_nvlists(&zc); + return (-1); + } + } + if (!flags.dryrun && zhp->zfs_type == ZFS_TYPE_VOLUME && + zvol_remove_link(hdl, zhp->zfs_name) != 0) { + zfs_close(zhp); + zcmd_free_nvlists(&zc); + return (-1); + } + zfs_close(zhp); + } else { + /* + * Destination filesystem does not exist. Therefore we better + * be creating a new filesystem (either from a full backup, or + * a clone). It would therefore be invalid if the user + * specified only the pool name (i.e. if the destination name + * contained no slash character). + */ + if (!stream_wantsnewfs || + (cp = strrchr(zc.zc_name, '/')) == NULL) { + zcmd_free_nvlists(&zc); + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination '%s' does not exist"), zc.zc_name); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } + + /* + * Trim off the final dataset component so we perform the + * recvbackup ioctl to the filesystems's parent. + */ + *cp = '\0'; + + if (flags.isprefix && !flags.dryrun && + create_parents(hdl, zc.zc_value, strlen(tosnap)) != 0) { + zcmd_free_nvlists(&zc); + return (zfs_error(hdl, EZFS_BADRESTORE, errbuf)); + } + + newfs = B_TRUE; + } + + zc.zc_begin_record = drr_noswap->drr_u.drr_begin; + zc.zc_cookie = infd; + zc.zc_guid = flags.force; + if (flags.verbose) { + (void) printf("%s %s stream of %s into %s\n", + flags.dryrun ? "would receive" : "receiving", + drrb->drr_fromguid ? "incremental" : "full", + drrb->drr_toname, zc.zc_value); + (void) fflush(stdout); + } + + if (flags.dryrun) { + zcmd_free_nvlists(&zc); + return (recv_skip(hdl, infd, flags.byteswap)); + } + + err = ioctl_err = zfs_ioctl(hdl, ZFS_IOC_RECV, &zc); + ioctl_errno = errno; + zcmd_free_nvlists(&zc); + + if (err == 0 && snapprops_nvlist) { + zfs_cmd_t zc2 = { 0 }; + + (void) strcpy(zc2.zc_name, zc.zc_value); + if (zcmd_write_src_nvlist(hdl, &zc2, snapprops_nvlist) == 0) { + (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc2); + zcmd_free_nvlists(&zc2); + } + } + + if (err && (ioctl_errno == ENOENT || ioctl_errno == ENODEV)) { + /* + * It may be that this snapshot already exists, + * in which case we want to consume & ignore it + * rather than failing. + */ + avl_tree_t *local_avl; + nvlist_t *local_nv, *fs; + char *cp = strchr(zc.zc_value, '@'); + + /* + * XXX Do this faster by just iterating over snaps in + * this fs. Also if zc_value does not exist, we will + * get a strange "does not exist" error message. + */ + *cp = '\0'; + if (gather_nvlist(hdl, zc.zc_value, NULL, NULL, + &local_nv, &local_avl) == 0) { + *cp = '@'; + fs = fsavl_find(local_avl, drrb->drr_toguid, NULL); + fsavl_destroy(local_avl); + nvlist_free(local_nv); + + if (fs != NULL) { + if (flags.verbose) { + (void) printf("snap %s already exists; " + "ignoring\n", zc.zc_value); + } + ioctl_err = recv_skip(hdl, infd, + flags.byteswap); + } + } + *cp = '@'; + } + + + if (ioctl_err != 0) { + switch (ioctl_errno) { + case ENODEV: + cp = strchr(zc.zc_value, '@'); + *cp = '\0'; + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "most recent snapshot of %s does not\n" + "match incremental source"), zc.zc_value); + (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); + *cp = '@'; + break; + case ETXTBSY: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination %s has been modified\n" + "since most recent snapshot"), zc.zc_name); + (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); + break; + case EEXIST: + cp = strchr(zc.zc_value, '@'); + if (newfs) { + /* it's the containing fs that exists */ + *cp = '\0'; + } + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "destination already exists")); + (void) zfs_error_fmt(hdl, EZFS_EXISTS, + dgettext(TEXT_DOMAIN, "cannot restore to %s"), + zc.zc_value); + *cp = '@'; + break; + case EINVAL: + (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); + break; + case ECKSUM: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid stream (checksum mismatch)")); + (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); + break; + default: + (void) zfs_standard_error(hdl, ioctl_errno, errbuf); + } + } + + /* + * Mount or recreate the /dev links for the target filesystem + * (if created, or if we tore them down to do an incremental + * restore), and the /dev links for the new snapshot (if + * created). Also mount any children of the target filesystem + * if we did an incremental receive. + */ + cp = strchr(zc.zc_value, '@'); + if (cp && (ioctl_err == 0 || !newfs)) { + zfs_handle_t *h; + + *cp = '\0'; + h = zfs_open(hdl, zc.zc_value, + ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); + if (h != NULL) { + if (h->zfs_type == ZFS_TYPE_VOLUME) { + *cp = '@'; + err = zvol_create_link(hdl, h->zfs_name); + if (err == 0 && ioctl_err == 0) + err = zvol_create_link(hdl, + zc.zc_value); + } else if (newfs) { + /* + * Track the first/top of hierarchy fs, + * for mounting and sharing later. + */ + if (top_zfs && *top_zfs == NULL) + *top_zfs = zfs_strdup(hdl, zc.zc_value); + } + zfs_close(h); + } + *cp = '@'; + } + + if (clp) { + err |= changelist_postfix(clp); + changelist_free(clp); + } + + if (err || ioctl_err) + return (-1); + + if (flags.verbose) { + char buf1[64]; + char buf2[64]; + uint64_t bytes = zc.zc_cookie; + time_t delta = time(NULL) - begin_time; + if (delta == 0) + delta = 1; + zfs_nicenum(bytes, buf1, sizeof (buf1)); + zfs_nicenum(bytes/delta, buf2, sizeof (buf1)); + + (void) printf("received %sB stream in %lu seconds (%sB/sec)\n", + buf1, delta, buf2); + } + + return (0); +} + +static int +zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t flags, + int infd, avl_tree_t *stream_avl, char **top_zfs) +{ + int err; + dmu_replay_record_t drr, drr_noswap; + struct drr_begin *drrb = &drr.drr_u.drr_begin; + char errbuf[1024]; + zio_cksum_t zcksum = { 0 }; + + (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, + "cannot receive")); + + if (flags.isprefix && + !zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs " + "(%s) does not exist"), tosnap); + return (zfs_error(hdl, EZFS_NOENT, errbuf)); + } + + /* read in the BEGIN record */ + if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE, + &zcksum))) + return (err); + + if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) { + /* It's the double end record at the end of a package */ + return (ENODATA); + } + + /* the kernel needs the non-byteswapped begin record */ + drr_noswap = drr; + + flags.byteswap = B_FALSE; + if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { + /* + * We computed the checksum in the wrong byteorder in + * recv_read() above; do it again correctly. + */ + bzero(&zcksum, sizeof (zio_cksum_t)); + fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum); + flags.byteswap = B_TRUE; + + drr.drr_type = BSWAP_32(drr.drr_type); + drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen); + drrb->drr_magic = BSWAP_64(drrb->drr_magic); + drrb->drr_version = BSWAP_64(drrb->drr_version); + drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); + drrb->drr_type = BSWAP_32(drrb->drr_type); + drrb->drr_flags = BSWAP_32(drrb->drr_flags); + drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); + drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); + } + + if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " + "stream (bad magic number)")); + return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); + } + + if (strchr(drrb->drr_toname, '@') == NULL) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " + "stream (bad snapshot name)")); + return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); + } + + if (drrb->drr_version == DMU_BACKUP_STREAM_VERSION) { + return (zfs_receive_one(hdl, infd, tosnap, flags, + &drr, &drr_noswap, stream_avl, top_zfs)); + } else if (drrb->drr_version == DMU_BACKUP_HEADER_VERSION) { + return (zfs_receive_package(hdl, infd, tosnap, flags, + &drr, &zcksum, top_zfs)); + } else { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "stream is unsupported version %llu"), + drrb->drr_version); + return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); + } +} + +/* + * Restores a backup of tosnap from the file descriptor specified by infd. + * Return 0 on total success, -2 if some things couldn't be + * destroyed/renamed/promoted, -1 if some things couldn't be received. + * (-1 will override -2). + */ +int +zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t flags, + int infd, avl_tree_t *stream_avl) +{ + char *top_zfs = NULL; + int err; + + err = zfs_receive_impl(hdl, tosnap, flags, infd, stream_avl, &top_zfs); + + if (err == 0 && top_zfs) { + zfs_handle_t *zhp; + prop_changelist_t *clp; + + zhp = zfs_open(hdl, top_zfs, ZFS_TYPE_FILESYSTEM); + if (zhp != NULL) { + clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, + CL_GATHER_MOUNT_ALWAYS, 0); + zfs_close(zhp); + if (clp != NULL) { + /* mount and share received datasets */ + err = changelist_postfix(clp); + changelist_free(clp); + } + } + if (zhp == NULL || clp == NULL || err) + err = -1; + } + if (top_zfs) + free(top_zfs); + + return (err); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_status.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_status.c new file mode 100644 index 000000000000..c7eb04e74cac --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_status.c @@ -0,0 +1,317 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * This file contains the functions which analyze the status of a pool. This + * include both the status of an active pool, as well as the status exported + * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of + * the pool. This status is independent (to a certain degree) from the state of + * the pool. A pool's state describes only whether or not it is capable of + * providing the necessary fault tolerance for data. The status describes the + * overall status of devices. A pool that is online can still have a device + * that is experiencing errors. + * + * Only a subset of the possible faults can be detected using 'zpool status', + * and not all possible errors correspond to a FMA message ID. The explanation + * is left up to the caller, depending on whether it is a live pool or an + * import. + */ + +#include +#include +#include +#include "libzfs_impl.h" + +/* + * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines + * in libzfs.h. Note that there are some status results which go past the end + * of this table, and hence have no associated message ID. + */ +static char *zfs_msgid_table[] = { + "ZFS-8000-14", + "ZFS-8000-2Q", + "ZFS-8000-3C", + "ZFS-8000-4J", + "ZFS-8000-5E", + "ZFS-8000-6X", + "ZFS-8000-72", + "ZFS-8000-8A", + "ZFS-8000-9P", + "ZFS-8000-A5", + "ZFS-8000-EY", + "ZFS-8000-HC", + "ZFS-8000-JQ", + "ZFS-8000-K4", +}; + +#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0])) + +/* ARGSUSED */ +static int +vdev_missing(uint64_t state, uint64_t aux, uint64_t errs) +{ + return (state == VDEV_STATE_CANT_OPEN && + aux == VDEV_AUX_OPEN_FAILED); +} + +/* ARGSUSED */ +static int +vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs) +{ + return (state == VDEV_STATE_FAULTED); +} + +/* ARGSUSED */ +static int +vdev_errors(uint64_t state, uint64_t aux, uint64_t errs) +{ + return (state == VDEV_STATE_DEGRADED || errs != 0); +} + +/* ARGSUSED */ +static int +vdev_broken(uint64_t state, uint64_t aux, uint64_t errs) +{ + return (state == VDEV_STATE_CANT_OPEN); +} + +/* ARGSUSED */ +static int +vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs) +{ + return (state == VDEV_STATE_OFFLINE); +} + +/* + * Detect if any leaf devices that have seen errors or could not be opened. + */ +static boolean_t +find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t)) +{ + nvlist_t **child; + vdev_stat_t *vs; + uint_t c, children; + char *type; + + /* + * Ignore problems within a 'replacing' vdev, since we're presumably in + * the process of repairing any such errors, and don't want to call them + * out again. We'll pick up the fact that a resilver is happening + * later. + */ + verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0); + if (strcmp(type, VDEV_TYPE_REPLACING) == 0) + return (B_FALSE); + + if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child, + &children) == 0) { + for (c = 0; c < children; c++) + if (find_vdev_problem(child[c], func)) + return (B_TRUE); + } else { + verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &c) == 0); + + if (func(vs->vs_state, vs->vs_aux, + vs->vs_read_errors + + vs->vs_write_errors + + vs->vs_checksum_errors)) + return (B_TRUE); + } + + return (B_FALSE); +} + +/* + * Active pool health status. + * + * To determine the status for a pool, we make several passes over the config, + * picking the most egregious error we find. In order of importance, we do the + * following: + * + * - Check for a complete and valid configuration + * - Look for any faulted or missing devices in a non-replicated config + * - Check for any data errors + * - Check for any faulted or missing devices in a replicated config + * - Look for any devices showing errors + * - Check for any resilvering devices + * + * There can obviously be multiple errors within a single pool, so this routine + * only picks the most damaging of all the current errors to report. + */ +static zpool_status_t +check_status(nvlist_t *config, boolean_t isimport) +{ + nvlist_t *nvroot; + vdev_stat_t *vs; + uint_t vsc; + uint64_t nerr; + uint64_t version; + uint64_t stateval; + uint64_t suspended; + uint64_t hostid = 0; + + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, + &version) == 0); + verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &vsc) == 0); + verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, + &stateval) == 0); + (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid); + + /* + * Pool last accessed by another system. + */ + if (hostid != 0 && (unsigned long)hostid != gethostid() && + stateval == POOL_STATE_ACTIVE) + return (ZPOOL_STATUS_HOSTID_MISMATCH); + + /* + * Newer on-disk version. + */ + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + vs->vs_aux == VDEV_AUX_VERSION_NEWER) + return (ZPOOL_STATUS_VERSION_NEWER); + + /* + * Check that the config is complete. + */ + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + vs->vs_aux == VDEV_AUX_BAD_GUID_SUM) + return (ZPOOL_STATUS_BAD_GUID_SUM); + + /* + * Check whether the pool has suspended due to failed I/O. + */ + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED, + &suspended) == 0) { + if (suspended == ZIO_FAILURE_MODE_CONTINUE) + return (ZPOOL_STATUS_IO_FAILURE_CONTINUE); + return (ZPOOL_STATUS_IO_FAILURE_WAIT); + } + + /* + * Could not read a log. + */ + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + vs->vs_aux == VDEV_AUX_BAD_LOG) { + return (ZPOOL_STATUS_BAD_LOG); + } + + /* + * Bad devices in non-replicated config. + */ + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + find_vdev_problem(nvroot, vdev_faulted)) + return (ZPOOL_STATUS_FAULTED_DEV_NR); + + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + find_vdev_problem(nvroot, vdev_missing)) + return (ZPOOL_STATUS_MISSING_DEV_NR); + + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + find_vdev_problem(nvroot, vdev_broken)) + return (ZPOOL_STATUS_CORRUPT_LABEL_NR); + + /* + * Corrupted pool metadata + */ + if (vs->vs_state == VDEV_STATE_CANT_OPEN && + vs->vs_aux == VDEV_AUX_CORRUPT_DATA) + return (ZPOOL_STATUS_CORRUPT_POOL); + + /* + * Persistent data errors. + */ + if (!isimport) { + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, + &nerr) == 0 && nerr != 0) + return (ZPOOL_STATUS_CORRUPT_DATA); + } + + /* + * Missing devices in a replicated config. + */ + if (find_vdev_problem(nvroot, vdev_faulted)) + return (ZPOOL_STATUS_FAULTED_DEV_R); + if (find_vdev_problem(nvroot, vdev_missing)) + return (ZPOOL_STATUS_MISSING_DEV_R); + if (find_vdev_problem(nvroot, vdev_broken)) + return (ZPOOL_STATUS_CORRUPT_LABEL_R); + + /* + * Devices with errors + */ + if (!isimport && find_vdev_problem(nvroot, vdev_errors)) + return (ZPOOL_STATUS_FAILING_DEV); + + /* + * Offlined devices + */ + if (find_vdev_problem(nvroot, vdev_offlined)) + return (ZPOOL_STATUS_OFFLINE_DEV); + + /* + * Currently resilvering + */ + if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER) + return (ZPOOL_STATUS_RESILVERING); + + /* + * Outdated, but usable, version + */ + if (version < SPA_VERSION) + return (ZPOOL_STATUS_VERSION_OLDER); + + return (ZPOOL_STATUS_OK); +} + +zpool_status_t +zpool_get_status(zpool_handle_t *zhp, char **msgid) +{ + zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE); + + if (ret >= NMSGID) + *msgid = NULL; + else + *msgid = zfs_msgid_table[ret]; + + return (ret); +} + +zpool_status_t +zpool_import_status(nvlist_t *config, char **msgid) +{ + zpool_status_t ret = check_status(config, B_TRUE); + + if (ret >= NMSGID) + *msgid = NULL; + else + *msgid = zfs_msgid_table[ret]; + + return (ret); +} diff --git a/external/cddl/osnet/dist/lib/libzfs/common/libzfs_util.c b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_util.c new file mode 100644 index 000000000000..54de0f4b50a4 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzfs/common/libzfs_util.c @@ -0,0 +1,1403 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Internal utility routines for the ZFS library. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libzfs_impl.h" +#include "zfs_prop.h" + +int +libzfs_errno(libzfs_handle_t *hdl) +{ + return (hdl->libzfs_error); +} + +const char * +libzfs_error_action(libzfs_handle_t *hdl) +{ + return (hdl->libzfs_action); +} + +const char * +libzfs_error_description(libzfs_handle_t *hdl) +{ + if (hdl->libzfs_desc[0] != '\0') + return (hdl->libzfs_desc); + + switch (hdl->libzfs_error) { + case EZFS_NOMEM: + return (dgettext(TEXT_DOMAIN, "out of memory")); + case EZFS_BADPROP: + return (dgettext(TEXT_DOMAIN, "invalid property value")); + case EZFS_PROPREADONLY: + return (dgettext(TEXT_DOMAIN, "read only property")); + case EZFS_PROPTYPE: + return (dgettext(TEXT_DOMAIN, "property doesn't apply to " + "datasets of this type")); + case EZFS_PROPNONINHERIT: + return (dgettext(TEXT_DOMAIN, "property cannot be inherited")); + case EZFS_PROPSPACE: + return (dgettext(TEXT_DOMAIN, "invalid quota or reservation")); + case EZFS_BADTYPE: + return (dgettext(TEXT_DOMAIN, "operation not applicable to " + "datasets of this type")); + case EZFS_BUSY: + return (dgettext(TEXT_DOMAIN, "pool or dataset is busy")); + case EZFS_EXISTS: + return (dgettext(TEXT_DOMAIN, "pool or dataset exists")); + case EZFS_NOENT: + return (dgettext(TEXT_DOMAIN, "no such pool or dataset")); + case EZFS_BADSTREAM: + return (dgettext(TEXT_DOMAIN, "invalid backup stream")); + case EZFS_DSREADONLY: + return (dgettext(TEXT_DOMAIN, "dataset is read only")); + case EZFS_VOLTOOBIG: + return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for " + "this system")); + case EZFS_VOLHASDATA: + return (dgettext(TEXT_DOMAIN, "volume has data")); + case EZFS_INVALIDNAME: + return (dgettext(TEXT_DOMAIN, "invalid name")); + case EZFS_BADRESTORE: + return (dgettext(TEXT_DOMAIN, "unable to restore to " + "destination")); + case EZFS_BADBACKUP: + return (dgettext(TEXT_DOMAIN, "backup failed")); + case EZFS_BADTARGET: + return (dgettext(TEXT_DOMAIN, "invalid target vdev")); + case EZFS_NODEVICE: + return (dgettext(TEXT_DOMAIN, "no such device in pool")); + case EZFS_BADDEV: + return (dgettext(TEXT_DOMAIN, "invalid device")); + case EZFS_NOREPLICAS: + return (dgettext(TEXT_DOMAIN, "no valid replicas")); + case EZFS_RESILVERING: + return (dgettext(TEXT_DOMAIN, "currently resilvering")); + case EZFS_BADVERSION: + return (dgettext(TEXT_DOMAIN, "unsupported version")); + case EZFS_POOLUNAVAIL: + return (dgettext(TEXT_DOMAIN, "pool is unavailable")); + case EZFS_DEVOVERFLOW: + return (dgettext(TEXT_DOMAIN, "too many devices in one vdev")); + case EZFS_BADPATH: + return (dgettext(TEXT_DOMAIN, "must be an absolute path")); + case EZFS_CROSSTARGET: + return (dgettext(TEXT_DOMAIN, "operation crosses datasets or " + "pools")); + case EZFS_ZONED: + return (dgettext(TEXT_DOMAIN, "dataset in use by local zone")); + case EZFS_MOUNTFAILED: + return (dgettext(TEXT_DOMAIN, "mount failed")); + case EZFS_UMOUNTFAILED: + return (dgettext(TEXT_DOMAIN, "umount failed")); + case EZFS_UNSHARENFSFAILED: + return (dgettext(TEXT_DOMAIN, "unshare(1M) failed")); + case EZFS_SHARENFSFAILED: + return (dgettext(TEXT_DOMAIN, "share(1M) failed")); + case EZFS_UNSHARESMBFAILED: + return (dgettext(TEXT_DOMAIN, "smb remove share failed")); + case EZFS_SHARESMBFAILED: + return (dgettext(TEXT_DOMAIN, "smb add share failed")); + case EZFS_ISCSISVCUNAVAIL: + return (dgettext(TEXT_DOMAIN, + "iscsitgt service need to be enabled by " + "a privileged user")); + case EZFS_DEVLINKS: + return (dgettext(TEXT_DOMAIN, "failed to create /dev links")); + case EZFS_PERM: + return (dgettext(TEXT_DOMAIN, "permission denied")); + case EZFS_NOSPC: + return (dgettext(TEXT_DOMAIN, "out of space")); + case EZFS_IO: + return (dgettext(TEXT_DOMAIN, "I/O error")); + case EZFS_INTR: + return (dgettext(TEXT_DOMAIN, "signal received")); + case EZFS_ISSPARE: + return (dgettext(TEXT_DOMAIN, "device is reserved as a hot " + "spare")); + case EZFS_INVALCONFIG: + return (dgettext(TEXT_DOMAIN, "invalid vdev configuration")); + case EZFS_RECURSIVE: + return (dgettext(TEXT_DOMAIN, "recursive dataset dependency")); + case EZFS_NOHISTORY: + return (dgettext(TEXT_DOMAIN, "no history available")); + case EZFS_UNSHAREISCSIFAILED: + return (dgettext(TEXT_DOMAIN, + "iscsitgtd failed request to unshare")); + case EZFS_SHAREISCSIFAILED: + return (dgettext(TEXT_DOMAIN, + "iscsitgtd failed request to share")); + case EZFS_POOLPROPS: + return (dgettext(TEXT_DOMAIN, "failed to retrieve " + "pool properties")); + case EZFS_POOL_NOTSUP: + return (dgettext(TEXT_DOMAIN, "operation not supported " + "on this type of pool")); + case EZFS_POOL_INVALARG: + return (dgettext(TEXT_DOMAIN, "invalid argument for " + "this pool operation")); + case EZFS_NAMETOOLONG: + return (dgettext(TEXT_DOMAIN, "dataset name is too long")); + case EZFS_OPENFAILED: + return (dgettext(TEXT_DOMAIN, "open failed")); + case EZFS_NOCAP: + return (dgettext(TEXT_DOMAIN, + "disk capacity information could not be retrieved")); + case EZFS_LABELFAILED: + return (dgettext(TEXT_DOMAIN, "write of label failed")); + case EZFS_BADWHO: + return (dgettext(TEXT_DOMAIN, "invalid user/group")); + case EZFS_BADPERM: + return (dgettext(TEXT_DOMAIN, "invalid permission")); + case EZFS_BADPERMSET: + return (dgettext(TEXT_DOMAIN, "invalid permission set name")); + case EZFS_NODELEGATION: + return (dgettext(TEXT_DOMAIN, "delegated administration is " + "disabled on pool")); + case EZFS_PERMRDONLY: + return (dgettext(TEXT_DOMAIN, "snapshot permissions cannot be" + " modified")); + case EZFS_BADCACHE: + return (dgettext(TEXT_DOMAIN, "invalid or missing cache file")); + case EZFS_ISL2CACHE: + return (dgettext(TEXT_DOMAIN, "device is in use as a cache")); + case EZFS_VDEVNOTSUP: + return (dgettext(TEXT_DOMAIN, "vdev specification is not " + "supported")); + case EZFS_NOTSUP: + return (dgettext(TEXT_DOMAIN, "operation not supported " + "on this dataset")); + case EZFS_ACTIVE_SPARE: + return (dgettext(TEXT_DOMAIN, "pool has active shared spare " + "device")); + case EZFS_UNKNOWN: + return (dgettext(TEXT_DOMAIN, "unknown error")); + default: + assert(hdl->libzfs_error == 0); + return (dgettext(TEXT_DOMAIN, "no error")); + } +} + +/*PRINTFLIKE2*/ +void +zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + + (void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc), + fmt, ap); + hdl->libzfs_desc_active = 1; + + va_end(ap); +} + +static void +zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap) +{ + (void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action), + fmt, ap); + hdl->libzfs_error = error; + + if (hdl->libzfs_desc_active) + hdl->libzfs_desc_active = 0; + else + hdl->libzfs_desc[0] = '\0'; + + if (hdl->libzfs_printerr) { + if (error == EZFS_UNKNOWN) { + (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal " + "error: %s\n"), libzfs_error_description(hdl)); + abort(); + } + + (void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action, + libzfs_error_description(hdl)); + if (error == EZFS_NOMEM) + exit(1); + } +} + +int +zfs_error(libzfs_handle_t *hdl, int error, const char *msg) +{ + return (zfs_error_fmt(hdl, error, "%s", msg)); +} + +/*PRINTFLIKE3*/ +int +zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + + zfs_verror(hdl, error, fmt, ap); + + va_end(ap); + + return (-1); +} + +static int +zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt, + va_list ap) +{ + switch (error) { + case EPERM: + case EACCES: + zfs_verror(hdl, EZFS_PERM, fmt, ap); + return (-1); + + case ECANCELED: + zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap); + return (-1); + + case EIO: + zfs_verror(hdl, EZFS_IO, fmt, ap); + return (-1); + + case EINTR: + zfs_verror(hdl, EZFS_INTR, fmt, ap); + return (-1); + } + + return (0); +} + +int +zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg) +{ + return (zfs_standard_error_fmt(hdl, error, "%s", msg)); +} + +/*PRINTFLIKE3*/ +int +zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + + if (zfs_common_error(hdl, error, fmt, ap) != 0) { + va_end(ap); + return (-1); + } + + switch (error) { + case ENXIO: + case ENODEV: + zfs_verror(hdl, EZFS_IO, fmt, ap); + break; + + case ENOENT: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset does not exist")); + zfs_verror(hdl, EZFS_NOENT, fmt, ap); + break; + + case ENOSPC: + case EDQUOT: + zfs_verror(hdl, EZFS_NOSPC, fmt, ap); + return (-1); + + case EEXIST: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset already exists")); + zfs_verror(hdl, EZFS_EXISTS, fmt, ap); + break; + + case EBUSY: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "dataset is busy")); + zfs_verror(hdl, EZFS_BUSY, fmt, ap); + break; + case EROFS: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "snapshot permissions cannot be modified")); + zfs_verror(hdl, EZFS_PERMRDONLY, fmt, ap); + break; + case ENAMETOOLONG: + zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap); + break; + case ENOTSUP: + zfs_verror(hdl, EZFS_BADVERSION, fmt, ap); + break; + default: + zfs_error_aux(hdl, strerror(errno)); + zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap); + break; + } + + va_end(ap); + return (-1); +} + +int +zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg) +{ + return (zpool_standard_error_fmt(hdl, error, "%s", msg)); +} + +/*PRINTFLIKE3*/ +int +zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + + if (zfs_common_error(hdl, error, fmt, ap) != 0) { + va_end(ap); + return (-1); + } + + switch (error) { + case ENODEV: + zfs_verror(hdl, EZFS_NODEVICE, fmt, ap); + break; + + case ENOENT: + zfs_error_aux(hdl, + dgettext(TEXT_DOMAIN, "no such pool or dataset")); + zfs_verror(hdl, EZFS_NOENT, fmt, ap); + break; + + case EEXIST: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "pool already exists")); + zfs_verror(hdl, EZFS_EXISTS, fmt, ap); + break; + + case EBUSY: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy")); + zfs_verror(hdl, EZFS_BUSY, fmt, ap); + break; + + case ENXIO: + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "one or more devices is currently unavailable")); + zfs_verror(hdl, EZFS_BADDEV, fmt, ap); + break; + + case ENAMETOOLONG: + zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap); + break; + + case ENOTSUP: + zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap); + break; + + case EINVAL: + zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap); + break; + + case ENOSPC: + case EDQUOT: + zfs_verror(hdl, EZFS_NOSPC, fmt, ap); + return (-1); + + default: + zfs_error_aux(hdl, strerror(error)); + zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap); + } + + va_end(ap); + return (-1); +} + +/* + * Display an out of memory error message and abort the current program. + */ +int +no_memory(libzfs_handle_t *hdl) +{ + return (zfs_error(hdl, EZFS_NOMEM, "internal error")); +} + +/* + * A safe form of malloc() which will die if the allocation fails. + */ +void * +zfs_alloc(libzfs_handle_t *hdl, size_t size) +{ + void *data; + + if ((data = calloc(1, size)) == NULL) + (void) no_memory(hdl); + + return (data); +} + +/* + * A safe form of realloc(), which also zeroes newly allocated space. + */ +void * +zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize) +{ + void *ret; + + if ((ret = realloc(ptr, newsize)) == NULL) { + (void) no_memory(hdl); + free(ptr); + return (NULL); + } + + bzero((char *)ret + oldsize, (newsize - oldsize)); + return (ret); +} + +/* + * A safe form of strdup() which will die if the allocation fails. + */ +char * +zfs_strdup(libzfs_handle_t *hdl, const char *str) +{ + char *ret; + + if ((ret = strdup(str)) == NULL) + (void) no_memory(hdl); + + return (ret); +} + +/* + * Convert a number to an appropriately human-readable output. + */ +void +zfs_nicenum(uint64_t num, char *buf, size_t buflen) +{ + uint64_t n = num; + int index = 0; + char u; + + while (n >= 1024) { + n /= 1024; + index++; + } + + u = " KMGTPE"[index]; + + if (index == 0) { + (void) snprintf(buf, buflen, "%llu", n); + } else if ((num & ((1ULL << 10 * index) - 1)) == 0) { + /* + * If this is an even multiple of the base, always display + * without any decimal precision. + */ + (void) snprintf(buf, buflen, "%llu%c", n, u); + } else { + /* + * We want to choose a precision that reflects the best choice + * for fitting in 5 characters. This can get rather tricky when + * we have numbers that are very close to an order of magnitude. + * For example, when displaying 10239 (which is really 9.999K), + * we want only a single place of precision for 10.0K. We could + * develop some complex heuristics for this, but it's much + * easier just to try each combination in turn. + */ + int i; + for (i = 2; i >= 0; i--) { + if (snprintf(buf, buflen, "%.*f%c", i, + (double)num / (1ULL << 10 * index), u) <= 5) + break; + } + } +} + +void +libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr) +{ + hdl->libzfs_printerr = printerr; +} + +libzfs_handle_t * +libzfs_init(void) +{ + libzfs_handle_t *hdl; + + if ((hdl = calloc(sizeof (libzfs_handle_t), 1)) == NULL) { + return (NULL); + } + + if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR)) < 0) { + free(hdl); + return (NULL); + } + + if ((hdl->libzfs_mnttab = fopen(MNTTAB, "r")) == NULL) { + (void) close(hdl->libzfs_fd); + free(hdl); + return (NULL); + } + + hdl->libzfs_sharetab = fopen("/etc/dfs/sharetab", "r"); + + zfs_prop_init(); + zpool_prop_init(); + + return (hdl); +} + +void +libzfs_fini(libzfs_handle_t *hdl) +{ + (void) close(hdl->libzfs_fd); + if (hdl->libzfs_mnttab) + (void) fclose(hdl->libzfs_mnttab); + if (hdl->libzfs_sharetab) + (void) fclose(hdl->libzfs_sharetab); + zfs_uninit_libshare(hdl); + if (hdl->libzfs_log_str) + (void) free(hdl->libzfs_log_str); + zpool_free_handles(hdl); + namespace_clear(hdl); + free(hdl); +} + +libzfs_handle_t * +zpool_get_handle(zpool_handle_t *zhp) +{ + return (zhp->zpool_hdl); +} + +libzfs_handle_t * +zfs_get_handle(zfs_handle_t *zhp) +{ + return (zhp->zfs_hdl); +} + +zpool_handle_t * +zfs_get_pool_handle(const zfs_handle_t *zhp) +{ + return (zhp->zpool_hdl); +} + +/* + * Given a name, determine whether or not it's a valid path + * (starts with '/' or "./"). If so, walk the mnttab trying + * to match the device number. If not, treat the path as an + * fs/vol/snap name. + */ +zfs_handle_t * +zfs_path_to_zhandle(libzfs_handle_t *hdl, char *path, zfs_type_t argtype) +{ + struct stat64 statbuf; + struct extmnttab entry; + int ret; + + if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) { + /* + * It's not a valid path, assume it's a name of type 'argtype'. + */ + return (zfs_open(hdl, path, argtype)); + } + + if (stat64(path, &statbuf) != 0) { + (void) fprintf(stderr, "%s: %s\n", path, strerror(errno)); + return (NULL); + } + + rewind(hdl->libzfs_mnttab); + while ((ret = getextmntent(hdl->libzfs_mnttab, &entry, 0)) == 0) { + if (makedevice(entry.mnt_major, entry.mnt_minor) == + statbuf.st_dev) { + break; + } + } + if (ret != 0) { + return (NULL); + } + + if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) { + (void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"), + path); + return (NULL); + } + + return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM)); +} + +/* + * Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from + * an ioctl(). + */ +int +zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len) +{ + if (len == 0) + len = 2048; + zc->zc_nvlist_dst_size = len; + if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t) + zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == NULL) + return (-1); + + return (0); +} + +/* + * Called when an ioctl() which returns an nvlist fails with ENOMEM. This will + * expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was + * filled in by the kernel to indicate the actual required size. + */ +int +zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc) +{ + free((void *)(uintptr_t)zc->zc_nvlist_dst); + if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t) + zfs_alloc(hdl, zc->zc_nvlist_dst_size)) + == NULL) + return (-1); + + return (0); +} + +/* + * Called to free the src and dst nvlists stored in the command structure. + */ +void +zcmd_free_nvlists(zfs_cmd_t *zc) +{ + free((void *)(uintptr_t)zc->zc_nvlist_conf); + free((void *)(uintptr_t)zc->zc_nvlist_src); + free((void *)(uintptr_t)zc->zc_nvlist_dst); +} + +static int +zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen, + nvlist_t *nvl) +{ + char *packed; + size_t len; + + verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0); + + if ((packed = zfs_alloc(hdl, len)) == NULL) + return (-1); + + verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0); + + *outnv = (uint64_t)(uintptr_t)packed; + *outlen = len; + + return (0); +} + +int +zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl) +{ + return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf, + &zc->zc_nvlist_conf_size, nvl)); +} + +int +zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl) +{ + return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src, + &zc->zc_nvlist_src_size, nvl)); +} + +/* + * Unpacks an nvlist from the ZFS ioctl command structure. + */ +int +zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp) +{ + if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst, + zc->zc_nvlist_dst_size, nvlp, 0) != 0) + return (no_memory(hdl)); + + return (0); +} + +int +zfs_ioctl(libzfs_handle_t *hdl, int request, zfs_cmd_t *zc) +{ + int error; + + zc->zc_history = (uint64_t)(uintptr_t)hdl->libzfs_log_str; + error = ioctl(hdl->libzfs_fd, request, zc); + if (hdl->libzfs_log_str) { + free(hdl->libzfs_log_str); + hdl->libzfs_log_str = NULL; + } + zc->zc_history = 0; + + return (error); +} + +/* + * ================================================================ + * API shared by zfs and zpool property management + * ================================================================ + */ + +static void +zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type) +{ + zprop_list_t *pl = cbp->cb_proplist; + int i; + char *title; + size_t len; + + cbp->cb_first = B_FALSE; + if (cbp->cb_scripted) + return; + + /* + * Start with the length of the column headers. + */ + cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME")); + cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN, + "PROPERTY")); + cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN, + "VALUE")); + cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN, + "SOURCE")); + + /* + * Go through and calculate the widths for each column. For the + * 'source' column, we kludge it up by taking the worst-case scenario of + * inheriting from the longest name. This is acceptable because in the + * majority of cases 'SOURCE' is the last column displayed, and we don't + * use the width anyway. Note that the 'VALUE' column can be oversized, + * if the name of the property is much longer the any values we find. + */ + for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { + /* + * 'PROPERTY' column + */ + if (pl->pl_prop != ZPROP_INVAL) { + const char *propname = (type == ZFS_TYPE_POOL) ? + zpool_prop_to_name(pl->pl_prop) : + zfs_prop_to_name(pl->pl_prop); + + len = strlen(propname); + if (len > cbp->cb_colwidths[GET_COL_PROPERTY]) + cbp->cb_colwidths[GET_COL_PROPERTY] = len; + } else { + len = strlen(pl->pl_user_prop); + if (len > cbp->cb_colwidths[GET_COL_PROPERTY]) + cbp->cb_colwidths[GET_COL_PROPERTY] = len; + } + + /* + * 'VALUE' column + */ + if ((pl->pl_prop != ZFS_PROP_NAME || !pl->pl_all) && + pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE]) + cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width; + + /* + * 'NAME' and 'SOURCE' columns + */ + if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME : + ZFS_PROP_NAME) && + pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) { + cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width; + cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width + + strlen(dgettext(TEXT_DOMAIN, "inherited from")); + } + } + + /* + * Now go through and print the headers. + */ + for (i = 0; i < 4; i++) { + switch (cbp->cb_columns[i]) { + case GET_COL_NAME: + title = dgettext(TEXT_DOMAIN, "NAME"); + break; + case GET_COL_PROPERTY: + title = dgettext(TEXT_DOMAIN, "PROPERTY"); + break; + case GET_COL_VALUE: + title = dgettext(TEXT_DOMAIN, "VALUE"); + break; + case GET_COL_SOURCE: + title = dgettext(TEXT_DOMAIN, "SOURCE"); + break; + default: + title = NULL; + } + + if (title != NULL) { + if (i == 3 || cbp->cb_columns[i + 1] == 0) + (void) printf("%s", title); + else + (void) printf("%-*s ", + cbp->cb_colwidths[cbp->cb_columns[i]], + title); + } + } + (void) printf("\n"); +} + +/* + * Display a single line of output, according to the settings in the callback + * structure. + */ +void +zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp, + const char *propname, const char *value, zprop_source_t sourcetype, + const char *source) +{ + int i; + const char *str; + char buf[128]; + + /* + * Ignore those source types that the user has chosen to ignore. + */ + if ((sourcetype & cbp->cb_sources) == 0) + return; + + if (cbp->cb_first) + zprop_print_headers(cbp, cbp->cb_type); + + for (i = 0; i < 4; i++) { + switch (cbp->cb_columns[i]) { + case GET_COL_NAME: + str = name; + break; + + case GET_COL_PROPERTY: + str = propname; + break; + + case GET_COL_VALUE: + str = value; + break; + + case GET_COL_SOURCE: + switch (sourcetype) { + case ZPROP_SRC_NONE: + str = "-"; + break; + + case ZPROP_SRC_DEFAULT: + str = "default"; + break; + + case ZPROP_SRC_LOCAL: + str = "local"; + break; + + case ZPROP_SRC_TEMPORARY: + str = "temporary"; + break; + + case ZPROP_SRC_INHERITED: + (void) snprintf(buf, sizeof (buf), + "inherited from %s", source); + str = buf; + break; + } + break; + + default: + continue; + } + + if (cbp->cb_columns[i + 1] == 0) + (void) printf("%s", str); + else if (cbp->cb_scripted) + (void) printf("%s\t", str); + else + (void) printf("%-*s ", + cbp->cb_colwidths[cbp->cb_columns[i]], + str); + + } + + (void) printf("\n"); +} + +/* + * Given a numeric suffix, convert the value into a number of bits that the + * resulting value must be shifted. + */ +static int +str2shift(libzfs_handle_t *hdl, const char *buf) +{ + const char *ends = "BKMGTPEZ"; + int i; + + if (buf[0] == '\0') + return (0); + for (i = 0; i < strlen(ends); i++) { + if (toupper(buf[0]) == ends[i]) + break; + } + if (i == strlen(ends)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid numeric suffix '%s'"), buf); + return (-1); + } + + /* + * We want to allow trailing 'b' characters for 'GB' or 'Mb'. But don't + * allow 'BB' - that's just weird. + */ + if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' && + toupper(buf[0]) != 'B')) + return (10*i); + + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid numeric suffix '%s'"), buf); + return (-1); +} + +/* + * Convert a string of the form '100G' into a real number. Used when setting + * properties or creating a volume. 'buf' is used to place an extended error + * message for the caller to use. + */ +int +zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num) +{ + char *end; + int shift; + + *num = 0; + + /* Check to see if this looks like a number. */ + if ((value[0] < '0' || value[0] > '9') && value[0] != '.') { + if (hdl) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "bad numeric value '%s'"), value); + return (-1); + } + + /* Rely on stroll() to process the numeric portion. */ + errno = 0; + *num = strtoll(value, &end, 10); + + /* + * Check for ERANGE, which indicates that the value is too large to fit + * in a 64-bit value. + */ + if (errno == ERANGE) { + if (hdl) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "numeric value is too large")); + return (-1); + } + + /* + * If we have a decimal value, then do the computation with floating + * point arithmetic. Otherwise, use standard arithmetic. + */ + if (*end == '.') { + double fval = strtod(value, &end); + + if ((shift = str2shift(hdl, end)) == -1) + return (-1); + + fval *= pow(2, shift); + + if (fval > UINT64_MAX) { + if (hdl) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "numeric value is too large")); + return (-1); + } + + *num = (uint64_t)fval; + } else { + if ((shift = str2shift(hdl, end)) == -1) + return (-1); + + /* Check for overflow */ + if (shift >= 64 || (*num << shift) >> shift != *num) { + if (hdl) + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "numeric value is too large")); + return (-1); + } + + *num <<= shift; + } + + return (0); +} + +/* + * Given a propname=value nvpair to set, parse any numeric properties + * (index, boolean, etc) if they are specified as strings and add the + * resulting nvpair to the returned nvlist. + * + * At the DSL layer, all properties are either 64-bit numbers or strings. + * We want the user to be able to ignore this fact and specify properties + * as native values (numbers, for example) or as strings (to simplify + * command line utilities). This also handles converting index types + * (compression, checksum, etc) from strings to their on-disk index. + */ +int +zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop, + zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp, + const char *errbuf) +{ + data_type_t datatype = nvpair_type(elem); + zprop_type_t proptype; + const char *propname; + char *value; + boolean_t isnone = B_FALSE; + + if (type == ZFS_TYPE_POOL) { + proptype = zpool_prop_get_type(prop); + propname = zpool_prop_to_name(prop); + } else { + proptype = zfs_prop_get_type(prop); + propname = zfs_prop_to_name(prop); + } + + /* + * Convert any properties to the internal DSL value types. + */ + *svalp = NULL; + *ivalp = 0; + + switch (proptype) { + case PROP_TYPE_STRING: + if (datatype != DATA_TYPE_STRING) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be a string"), nvpair_name(elem)); + goto error; + } + (void) nvpair_value_string(elem, svalp); + if (strlen(*svalp) >= ZFS_MAXPROPLEN) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' is too long"), nvpair_name(elem)); + goto error; + } + break; + + case PROP_TYPE_NUMBER: + if (datatype == DATA_TYPE_STRING) { + (void) nvpair_value_string(elem, &value); + if (strcmp(value, "none") == 0) { + isnone = B_TRUE; + } else if (zfs_nicestrtonum(hdl, value, ivalp) + != 0) { + goto error; + } + } else if (datatype == DATA_TYPE_UINT64) { + (void) nvpair_value_uint64(elem, ivalp); + } else { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be a number"), nvpair_name(elem)); + goto error; + } + + /* + * Quota special: force 'none' and don't allow 0. + */ + if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone && + (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "use 'none' to disable quota/refquota")); + goto error; + } + break; + + case PROP_TYPE_INDEX: + if (datatype != DATA_TYPE_STRING) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be a string"), nvpair_name(elem)); + goto error; + } + + (void) nvpair_value_string(elem, &value); + + if (zprop_string_to_index(prop, value, ivalp, type) != 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "'%s' must be one of '%s'"), propname, + zprop_values(prop, type)); + goto error; + } + break; + + default: + abort(); + } + + /* + * Add the result to our return set of properties. + */ + if (*svalp != NULL) { + if (nvlist_add_string(ret, propname, *svalp) != 0) { + (void) no_memory(hdl); + return (-1); + } + } else { + if (nvlist_add_uint64(ret, propname, *ivalp) != 0) { + (void) no_memory(hdl); + return (-1); + } + } + + return (0); +error: + (void) zfs_error(hdl, EZFS_BADPROP, errbuf); + return (-1); +} + +static int +addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp, + zfs_type_t type) +{ + int prop; + zprop_list_t *entry; + + prop = zprop_name_to_prop(propname, type); + + if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type)) + prop = ZPROP_INVAL; + + /* + * When no property table entry can be found, return failure if + * this is a pool property or if this isn't a user-defined + * dataset property, + */ + if (prop == ZPROP_INVAL && (type == ZFS_TYPE_POOL || + !zfs_prop_user(propname))) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "invalid property '%s'"), propname); + return (zfs_error(hdl, EZFS_BADPROP, + dgettext(TEXT_DOMAIN, "bad property list"))); + } + + if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL) + return (-1); + + entry->pl_prop = prop; + if (prop == ZPROP_INVAL) { + if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) == NULL) { + free(entry); + return (-1); + } + entry->pl_width = strlen(propname); + } else { + entry->pl_width = zprop_width(prop, &entry->pl_fixed, + type); + } + + *listp = entry; + + return (0); +} + +/* + * Given a comma-separated list of properties, construct a property list + * containing both user-defined and native properties. This function will + * return a NULL list if 'all' is specified, which can later be expanded + * by zprop_expand_list(). + */ +int +zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp, + zfs_type_t type) +{ + *listp = NULL; + + /* + * If 'all' is specified, return a NULL list. + */ + if (strcmp(props, "all") == 0) + return (0); + + /* + * If no props were specified, return an error. + */ + if (props[0] == '\0') { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "no properties specified")); + return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, + "bad property list"))); + } + + /* + * It would be nice to use getsubopt() here, but the inclusion of column + * aliases makes this more effort than it's worth. + */ + while (*props != '\0') { + size_t len; + char *p; + char c; + + if ((p = strchr(props, ',')) == NULL) { + len = strlen(props); + p = props + len; + } else { + len = p - props; + } + + /* + * Check for empty options. + */ + if (len == 0) { + zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, + "empty property name")); + return (zfs_error(hdl, EZFS_BADPROP, + dgettext(TEXT_DOMAIN, "bad property list"))); + } + + /* + * Check all regular property names. + */ + c = props[len]; + props[len] = '\0'; + + if (strcmp(props, "space") == 0) { + static char *spaceprops[] = { + "name", "avail", "used", "usedbysnapshots", + "usedbydataset", "usedbyrefreservation", + "usedbychildren", NULL + }; + int i; + + for (i = 0; spaceprops[i]; i++) { + if (addlist(hdl, spaceprops[i], listp, type)) + return (-1); + listp = &(*listp)->pl_next; + } + } else { + if (addlist(hdl, props, listp, type)) + return (-1); + listp = &(*listp)->pl_next; + } + + props = p; + if (c == ',') + props++; + } + + return (0); +} + +void +zprop_free_list(zprop_list_t *pl) +{ + zprop_list_t *next; + + while (pl != NULL) { + next = pl->pl_next; + free(pl->pl_user_prop); + free(pl); + pl = next; + } +} + +typedef struct expand_data { + zprop_list_t **last; + libzfs_handle_t *hdl; + zfs_type_t type; +} expand_data_t; + +int +zprop_expand_list_cb(int prop, void *cb) +{ + zprop_list_t *entry; + expand_data_t *edp = cb; + + if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL) + return (ZPROP_INVAL); + + entry->pl_prop = prop; + entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type); + entry->pl_all = B_TRUE; + + *(edp->last) = entry; + edp->last = &entry->pl_next; + + return (ZPROP_CONT); +} + +int +zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type) +{ + zprop_list_t *entry; + zprop_list_t **last; + expand_data_t exp; + + if (*plp == NULL) { + /* + * If this is the very first time we've been called for an 'all' + * specification, expand the list to include all native + * properties. + */ + last = plp; + + exp.last = last; + exp.hdl = hdl; + exp.type = type; + + if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE, + B_FALSE, type) == ZPROP_INVAL) + return (-1); + + /* + * Add 'name' to the beginning of the list, which is handled + * specially. + */ + if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL) + return (-1); + + entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : + ZFS_PROP_NAME; + entry->pl_width = zprop_width(entry->pl_prop, + &entry->pl_fixed, type); + entry->pl_all = B_TRUE; + entry->pl_next = *plp; + *plp = entry; + } + return (0); +} + +int +zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered, + zfs_type_t type) +{ + return (zprop_iter_common(func, cb, show_all, ordered, type)); +} diff --git a/external/cddl/osnet/dist/lib/libzpool/common/taskq.c b/external/cddl/osnet/dist/lib/libzpool/common/taskq.c new file mode 100644 index 000000000000..93acdcf8e4e3 --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzpool/common/taskq.c @@ -0,0 +1,261 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include + +int taskq_now; +taskq_t *system_taskq; + +typedef struct task { + struct task *task_next; + struct task *task_prev; + task_func_t *task_func; + void *task_arg; +} task_t; + +#define TASKQ_ACTIVE 0x00010000 + +struct taskq { + kmutex_t tq_lock; + krwlock_t tq_threadlock; + kcondvar_t tq_dispatch_cv; + kcondvar_t tq_wait_cv; + thread_t *tq_threadlist; + int tq_flags; + int tq_active; + int tq_nthreads; + int tq_nalloc; + int tq_minalloc; + int tq_maxalloc; + task_t *tq_freelist; + task_t tq_task; +}; + +static task_t * +task_alloc(taskq_t *tq, int tqflags) +{ + task_t *t; + + if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) { + tq->tq_freelist = t->task_next; + } else { + mutex_exit(&tq->tq_lock); + if (tq->tq_nalloc >= tq->tq_maxalloc) { + if (!(tqflags & KM_SLEEP)) { + mutex_enter(&tq->tq_lock); + return (NULL); + } + /* + * We don't want to exceed tq_maxalloc, but we can't + * wait for other tasks to complete (and thus free up + * task structures) without risking deadlock with + * the caller. So, we just delay for one second + * to throttle the allocation rate. + */ + delay(hz); + } + t = kmem_alloc(sizeof (task_t), tqflags); + mutex_enter(&tq->tq_lock); + if (t != NULL) + tq->tq_nalloc++; + } + return (t); +} + +static void +task_free(taskq_t *tq, task_t *t) +{ + if (tq->tq_nalloc <= tq->tq_minalloc) { + t->task_next = tq->tq_freelist; + tq->tq_freelist = t; + } else { + tq->tq_nalloc--; + mutex_exit(&tq->tq_lock); + kmem_free(t, sizeof (task_t)); + mutex_enter(&tq->tq_lock); + } +} + +taskqid_t +taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags) +{ + task_t *t; + + if (taskq_now) { + func(arg); + return (1); + } + + mutex_enter(&tq->tq_lock); + ASSERT(tq->tq_flags & TASKQ_ACTIVE); + if ((t = task_alloc(tq, tqflags)) == NULL) { + mutex_exit(&tq->tq_lock); + return (0); + } + t->task_next = &tq->tq_task; + t->task_prev = tq->tq_task.task_prev; + t->task_next->task_prev = t; + t->task_prev->task_next = t; + t->task_func = func; + t->task_arg = arg; + cv_signal(&tq->tq_dispatch_cv); + mutex_exit(&tq->tq_lock); + return (1); +} + +void +taskq_wait(taskq_t *tq) +{ + mutex_enter(&tq->tq_lock); + while (tq->tq_task.task_next != &tq->tq_task || tq->tq_active != 0) + cv_wait(&tq->tq_wait_cv, &tq->tq_lock); + mutex_exit(&tq->tq_lock); +} + +static void * +taskq_thread(void *arg) +{ + taskq_t *tq = arg; + task_t *t; + + mutex_enter(&tq->tq_lock); + while (tq->tq_flags & TASKQ_ACTIVE) { + if ((t = tq->tq_task.task_next) == &tq->tq_task) { + if (--tq->tq_active == 0) + cv_broadcast(&tq->tq_wait_cv); + cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock); + tq->tq_active++; + continue; + } + t->task_prev->task_next = t->task_next; + t->task_next->task_prev = t->task_prev; + mutex_exit(&tq->tq_lock); + + rw_enter(&tq->tq_threadlock, RW_READER); + t->task_func(t->task_arg); + rw_exit(&tq->tq_threadlock); + + mutex_enter(&tq->tq_lock); + task_free(tq, t); + } + tq->tq_nthreads--; + cv_broadcast(&tq->tq_wait_cv); + mutex_exit(&tq->tq_lock); + return (NULL); +} + +/*ARGSUSED*/ +taskq_t * +taskq_create(const char *name, int nthreads, pri_t pri, + int minalloc, int maxalloc, uint_t flags) +{ + taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP); + int t; + + rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL); + mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL); + cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL); + tq->tq_flags = flags | TASKQ_ACTIVE; + tq->tq_active = nthreads; + tq->tq_nthreads = nthreads; + tq->tq_minalloc = minalloc; + tq->tq_maxalloc = maxalloc; + tq->tq_task.task_next = &tq->tq_task; + tq->tq_task.task_prev = &tq->tq_task; + tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP); + + if (flags & TASKQ_PREPOPULATE) { + mutex_enter(&tq->tq_lock); + while (minalloc-- > 0) + task_free(tq, task_alloc(tq, KM_SLEEP)); + mutex_exit(&tq->tq_lock); + } + + for (t = 0; t < nthreads; t++) + (void) thr_create(0, 0, taskq_thread, + tq, THR_BOUND, &tq->tq_threadlist[t]); + + return (tq); +} + +void +taskq_destroy(taskq_t *tq) +{ + int t; + int nthreads = tq->tq_nthreads; + + taskq_wait(tq); + + mutex_enter(&tq->tq_lock); + + tq->tq_flags &= ~TASKQ_ACTIVE; + cv_broadcast(&tq->tq_dispatch_cv); + + while (tq->tq_nthreads != 0) + cv_wait(&tq->tq_wait_cv, &tq->tq_lock); + + tq->tq_minalloc = 0; + while (tq->tq_nalloc != 0) { + ASSERT(tq->tq_freelist != NULL); + task_free(tq, task_alloc(tq, KM_SLEEP)); + } + + mutex_exit(&tq->tq_lock); + + for (t = 0; t < nthreads; t++) + (void) thr_join(tq->tq_threadlist[t], NULL, NULL); + + kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t)); + + rw_destroy(&tq->tq_threadlock); + mutex_destroy(&tq->tq_lock); + cv_destroy(&tq->tq_dispatch_cv); + cv_destroy(&tq->tq_wait_cv); + + kmem_free(tq, sizeof (taskq_t)); +} + +int +taskq_member(taskq_t *tq, void *t) +{ + int i; + + if (taskq_now) + return (1); + + for (i = 0; i < tq->tq_nthreads; i++) + if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t) + return (1); + + return (0); +} + +void +system_taskq_init(void) +{ + system_taskq = taskq_create("system_taskq", 64, minclsyspri, 4, 512, + TASKQ_DYNAMIC | TASKQ_PREPOPULATE); +} diff --git a/external/cddl/osnet/dist/lib/libzpool/common/util.c b/external/cddl/osnet/dist/lib/libzpool/common/util.c new file mode 100644 index 000000000000..781edb6e8abc --- /dev/null +++ b/external/cddl/osnet/dist/lib/libzpool/common/util.c @@ -0,0 +1,156 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routines needed by more than one client of libzpool. + */ + +void +nicenum(uint64_t num, char *buf) +{ + uint64_t n = num; + int index = 0; + char u; + + while (n >= 1024) { + n = (n + (1024 / 2)) / 1024; /* Round up or down */ + index++; + } + + u = " KMGTPE"[index]; + + if (index == 0) { + (void) sprintf(buf, "%llu", (u_longlong_t)n); + } else if (n < 10 && (num & (num - 1)) != 0) { + (void) sprintf(buf, "%.2f%c", + (double)num / (1ULL << 10 * index), u); + } else if (n < 100 && (num & (num - 1)) != 0) { + (void) sprintf(buf, "%.1f%c", + (double)num / (1ULL << 10 * index), u); + } else { + (void) sprintf(buf, "%llu%c", (u_longlong_t)n, u); + } +} + +static void +show_vdev_stats(const char *desc, const char *ctype, nvlist_t *nv, int indent) +{ + vdev_stat_t *vs; + vdev_stat_t v0 = { 0 }; + uint64_t sec; + uint64_t is_log = 0; + nvlist_t **child; + uint_t c, children; + char used[6], avail[6]; + char rops[6], wops[6], rbytes[6], wbytes[6], rerr[6], werr[6], cerr[6]; + char *prefix = ""; + + if (indent == 0 && desc != NULL) { + (void) printf(" " + " capacity operations bandwidth ---- errors ----\n"); + (void) printf("description " + "used avail read write read write read write cksum\n"); + } + + if (desc != NULL) { + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log); + + if (is_log) + prefix = "log "; + + if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &c) != 0) + vs = &v0; + + sec = MAX(1, vs->vs_timestamp / NANOSEC); + + nicenum(vs->vs_alloc, used); + nicenum(vs->vs_space - vs->vs_alloc, avail); + nicenum(vs->vs_ops[ZIO_TYPE_READ] / sec, rops); + nicenum(vs->vs_ops[ZIO_TYPE_WRITE] / sec, wops); + nicenum(vs->vs_bytes[ZIO_TYPE_READ] / sec, rbytes); + nicenum(vs->vs_bytes[ZIO_TYPE_WRITE] / sec, wbytes); + nicenum(vs->vs_read_errors, rerr); + nicenum(vs->vs_write_errors, werr); + nicenum(vs->vs_checksum_errors, cerr); + + (void) printf("%*s%s%*s%*s%*s %5s %5s %5s %5s %5s %5s %5s\n", + indent, "", + prefix, + indent + strlen(prefix) - 25 - (vs->vs_space ? 0 : 12), + desc, + vs->vs_space ? 6 : 0, vs->vs_space ? used : "", + vs->vs_space ? 6 : 0, vs->vs_space ? avail : "", + rops, wops, rbytes, wbytes, rerr, werr, cerr); + } + + if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0) + return; + + for (c = 0; c < children; c++) { + nvlist_t *cnv = child[c]; + char *cname, *tname; + uint64_t np; + if (nvlist_lookup_string(cnv, ZPOOL_CONFIG_PATH, &cname) && + nvlist_lookup_string(cnv, ZPOOL_CONFIG_TYPE, &cname)) + cname = ""; + tname = calloc(1, strlen(cname) + 2); + (void) strcpy(tname, cname); + if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0) + tname[strlen(tname)] = '0' + np; + show_vdev_stats(tname, ctype, cnv, indent + 2); + free(tname); + } +} + +void +show_pool_stats(spa_t *spa) +{ + nvlist_t *config, *nvroot; + char *name; + + VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0); + + VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, + &name) == 0); + + show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0); + show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0); + show_vdev_stats(NULL, ZPOOL_CONFIG_SPARES, nvroot, 0); + + nvlist_free(config); +} diff --git a/external/cddl/osnet/dist/onet-src.tar.bz2 b/external/cddl/osnet/dist/onet-src.tar.bz2 new file mode 100644 index 000000000000..b56f3b974d6a Binary files /dev/null and b/external/cddl/osnet/dist/onet-src.tar.bz2 differ diff --git a/external/cddl/osnet/dist/uts/common/fs/gfs.c b/external/cddl/osnet/dist/uts/common/fs/gfs.c new file mode 100644 index 000000000000..4d24df60f75b --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/gfs.c @@ -0,0 +1,1178 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* Portions Copyright 2007 Shivakumar GN */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* + * Generic pseudo-filesystem routines. + * + * There are significant similarities between the implementation of certain file + * system entry points across different filesystems. While one could attempt to + * "choke up on the bat" and incorporate common functionality into a VOP + * preamble or postamble, such an approach is limited in the benefit it can + * provide. In this file we instead define a toolkit of routines which can be + * called from a filesystem (with in-kernel pseudo-filesystems being the focus + * of the exercise) in a more component-like fashion. + * + * There are three basic classes of routines: + * + * 1) Lowlevel support routines + * + * These routines are designed to play a support role for existing + * pseudo-filesystems (such as procfs). They simplify common tasks, + * without forcing the filesystem to hand over management to GFS. The + * routines covered are: + * + * gfs_readdir_init() + * gfs_readdir_emit() + * gfs_readdir_emitn() + * gfs_readdir_pred() + * gfs_readdir_fini() + * gfs_lookup_dot() + * + * 2) Complete GFS management + * + * These routines take a more active role in management of the + * pseudo-filesystem. They handle the relationship between vnode private + * data and VFS data, as well as the relationship between vnodes in the + * directory hierarchy. + * + * In order to use these interfaces, the first member of every private + * v_data must be a gfs_file_t or a gfs_dir_t. This hands over all control + * to GFS. + * + * gfs_file_create() + * gfs_dir_create() + * gfs_root_create() + * + * gfs_file_inactive() + * gfs_dir_inactive() + * gfs_dir_lookup() + * gfs_dir_readdir() + * + * gfs_vop_inactive() + * gfs_vop_lookup() + * gfs_vop_readdir() + * gfs_vop_map() + * + * 3) Single File pseudo-filesystems + * + * This routine creates a rooted file to be overlayed ontop of another + * file in the physical filespace. + * + * Note that the parent is NULL (actually the vfs), but there is nothing + * technically keeping such a file from utilizing the "Complete GFS + * management" set of routines. + * + * gfs_root_create_file() + */ + +/* + * gfs_make_opsvec: take an array of vnode type definitions and create + * their vnodeops_t structures + * + * This routine takes an array of gfs_opsvec_t's. It could + * alternatively take an array of gfs_opsvec_t*'s, which would allow + * vnode types to be completely defined in files external to the caller + * of gfs_make_opsvec(). As it stands, much more sharing takes place -- + * both the caller and the vnode type provider need to access gfsv_ops + * and gfsv_template, and the caller also needs to know gfsv_name. + */ +int +gfs_make_opsvec(gfs_opsvec_t *vec) +{ + int error, i; + + for (i = 0; ; i++) { + if (vec[i].gfsv_name == NULL) + return (0); + error = vn_make_ops(vec[i].gfsv_name, vec[i].gfsv_template, + vec[i].gfsv_ops); + if (error) + break; + } + + cmn_err(CE_WARN, "gfs_make_opsvec: bad vnode ops template for '%s'", + vec[i].gfsv_name); + for (i--; i >= 0; i--) { + vn_freevnodeops(*vec[i].gfsv_ops); + *vec[i].gfsv_ops = NULL; + } + return (error); +} + +/* + * Low level directory routines + * + * These routines provide some simple abstractions for reading directories. + * They are designed to be used by existing pseudo filesystems (namely procfs) + * that already have a complicated management infrastructure. + */ + +/* + * gfs_get_parent_ino: used to obtain a parent inode number and the + * inode number of the given vnode in preparation for calling gfs_readdir_init. + */ +int +gfs_get_parent_ino(vnode_t *dvp, cred_t *cr, caller_context_t *ct, + ino64_t *pino, ino64_t *ino) +{ + vnode_t *parent; + gfs_dir_t *dp = dvp->v_data; + int error; + + *ino = dp->gfsd_file.gfs_ino; + parent = dp->gfsd_file.gfs_parent; + + if (parent == NULL) { + *pino = *ino; /* root of filesystem */ + } else if (dvp->v_flag & V_XATTRDIR) { + vattr_t va; + + va.va_mask = AT_NODEID; + error = VOP_GETATTR(parent, &va, 0, cr, ct); + if (error) + return (error); + *pino = va.va_nodeid; + } else { + *pino = ((gfs_file_t *)(parent->v_data))->gfs_ino; + } + + return (0); +} + +/* + * gfs_readdir_init: initiate a generic readdir + * st - a pointer to an uninitialized gfs_readdir_state_t structure + * name_max - the directory's maximum file name length + * ureclen - the exported file-space record length (1 for non-legacy FSs) + * uiop - the uiop passed to readdir + * parent - the parent directory's inode + * self - this directory's inode + * flags - flags from VOP_READDIR + * + * Returns 0 or a non-zero errno. + * + * Typical VOP_READDIR usage of gfs_readdir_*: + * + * if ((error = gfs_readdir_init(...)) != 0) + * return (error); + * eof = 0; + * while ((error = gfs_readdir_pred(..., &voffset)) != 0) { + * if (!consumer_entry_at(voffset)) + * voffset = consumer_next_entry(voffset); + * if (consumer_eof(voffset)) { + * eof = 1 + * break; + * } + * if ((error = gfs_readdir_emit(..., voffset, + * consumer_ino(voffset), consumer_name(voffset))) != 0) + * break; + * } + * return (gfs_readdir_fini(..., error, eofp, eof)); + * + * As you can see, a zero result from gfs_readdir_pred() or + * gfs_readdir_emit() indicates that processing should continue, + * whereas a non-zero result indicates that the loop should terminate. + * Most consumers need do nothing more than let gfs_readdir_fini() + * determine what the cause of failure was and return the appropriate + * value. + */ +int +gfs_readdir_init(gfs_readdir_state_t *st, int name_max, int ureclen, + uio_t *uiop, ino64_t parent, ino64_t self, int flags) +{ + size_t dirent_size; + + if (uiop->uio_loffset < 0 || uiop->uio_resid <= 0 || + (uiop->uio_loffset % ureclen) != 0) + return (EINVAL); + + st->grd_ureclen = ureclen; + st->grd_oresid = uiop->uio_resid; + st->grd_namlen = name_max; + if (flags & V_RDDIR_ENTFLAGS) + dirent_size = EDIRENT_RECLEN(st->grd_namlen); + else + dirent_size = DIRENT64_RECLEN(st->grd_namlen); + st->grd_dirent = kmem_zalloc(dirent_size, KM_SLEEP); + st->grd_parent = parent; + st->grd_self = self; + st->grd_flags = flags; + + return (0); +} + +/* + * gfs_readdir_emit_int: internal routine to emit directory entry + * + * st - the current readdir state, which must have d_ino/ed_ino + * and d_name/ed_name set + * uiop - caller-supplied uio pointer + * next - the offset of the next entry + */ +static int +gfs_readdir_emit_int(gfs_readdir_state_t *st, uio_t *uiop, offset_t next) +{ + int reclen; + dirent64_t *dp; + edirent_t *edp; + + if (st->grd_flags & V_RDDIR_ENTFLAGS) { + edp = st->grd_dirent; + reclen = EDIRENT_RECLEN(strlen(edp->ed_name)); + } else { + dp = st->grd_dirent; + reclen = DIRENT64_RECLEN(strlen(dp->d_name)); + } + + if (reclen > uiop->uio_resid) { + /* + * Error if no entries were returned yet + */ + if (uiop->uio_resid == st->grd_oresid) + return (EINVAL); + return (-1); + } + + if (st->grd_flags & V_RDDIR_ENTFLAGS) { + edp->ed_off = next; + edp->ed_reclen = (ushort_t)reclen; + } else { + dp->d_off = next; + dp->d_reclen = (ushort_t)reclen; + } + + if (uiomove((caddr_t)st->grd_dirent, reclen, UIO_READ, uiop)) + return (EFAULT); + + uiop->uio_loffset = next; + + return (0); +} + +/* + * gfs_readdir_emit: emit a directory entry + * voff - the virtual offset (obtained from gfs_readdir_pred) + * ino - the entry's inode + * name - the entry's name + * eflags - value for ed_eflags (if processing edirent_t) + * + * Returns a 0 on success, a non-zero errno on failure, or -1 if the + * readdir loop should terminate. A non-zero result (either errno or + * -1) from this function is typically passed directly to + * gfs_readdir_fini(). + */ +int +gfs_readdir_emit(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff, + ino64_t ino, const char *name, int eflags) +{ + offset_t off = (voff + 2) * st->grd_ureclen; + + if (st->grd_flags & V_RDDIR_ENTFLAGS) { + edirent_t *edp = st->grd_dirent; + + edp->ed_ino = ino; + (void) strncpy(edp->ed_name, name, st->grd_namlen); + edp->ed_eflags = eflags; + } else { + dirent64_t *dp = st->grd_dirent; + + dp->d_ino = ino; + (void) strncpy(dp->d_name, name, st->grd_namlen); + } + + /* + * Inter-entry offsets are invalid, so we assume a record size of + * grd_ureclen and explicitly set the offset appropriately. + */ + return (gfs_readdir_emit_int(st, uiop, off + st->grd_ureclen)); +} + +/* + * gfs_readdir_emitn: like gfs_readdir_emit(), but takes an integer + * instead of a string for the entry's name. + */ +int +gfs_readdir_emitn(gfs_readdir_state_t *st, uio_t *uiop, offset_t voff, + ino64_t ino, unsigned long num) +{ + char buf[40]; + + numtos(num, buf); + return (gfs_readdir_emit(st, uiop, voff, ino, buf, 0)); +} + +/* + * gfs_readdir_pred: readdir loop predicate + * voffp - a pointer in which the next virtual offset should be stored + * + * Returns a 0 on success, a non-zero errno on failure, or -1 if the + * readdir loop should terminate. A non-zero result (either errno or + * -1) from this function is typically passed directly to + * gfs_readdir_fini(). + */ +int +gfs_readdir_pred(gfs_readdir_state_t *st, uio_t *uiop, offset_t *voffp) +{ + offset_t off, voff; + int error; + +top: + if (uiop->uio_resid <= 0) + return (-1); + + off = uiop->uio_loffset / st->grd_ureclen; + voff = off - 2; + if (off == 0) { + if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_self, + ".", 0)) == 0) + goto top; + } else if (off == 1) { + if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_parent, + "..", 0)) == 0) + goto top; + } else { + *voffp = voff; + return (0); + } + + return (error); +} + +/* + * gfs_readdir_fini: generic readdir cleanup + * error - if positive, an error to return + * eofp - the eofp passed to readdir + * eof - the eof value + * + * Returns a 0 on success, a non-zero errno on failure. This result + * should be returned from readdir. + */ +int +gfs_readdir_fini(gfs_readdir_state_t *st, int error, int *eofp, int eof) +{ + size_t dirent_size; + + if (st->grd_flags & V_RDDIR_ENTFLAGS) + dirent_size = EDIRENT_RECLEN(st->grd_namlen); + else + dirent_size = DIRENT64_RECLEN(st->grd_namlen); + kmem_free(st->grd_dirent, dirent_size); + if (error > 0) + return (error); + if (eofp) + *eofp = eof; + return (0); +} + +/* + * gfs_lookup_dot + * + * Performs a basic check for "." and ".." directory entries. + */ +int +gfs_lookup_dot(vnode_t **vpp, vnode_t *dvp, vnode_t *pvp, const char *nm) +{ + if (*nm == '\0' || strcmp(nm, ".") == 0) { + VN_HOLD(dvp); + *vpp = dvp; + return (0); + } else if (strcmp(nm, "..") == 0) { + if (pvp == NULL) { + ASSERT(dvp->v_flag & VROOT); + VN_HOLD(dvp); + *vpp = dvp; + } else { + VN_HOLD(pvp); + *vpp = pvp; + } + return (0); + } + + return (-1); +} + +/* + * gfs_file_create(): create a new GFS file + * + * size - size of private data structure (v_data) + * pvp - parent vnode (GFS directory) + * ops - vnode operations vector + * + * In order to use this interface, the parent vnode must have been created by + * gfs_dir_create(), and the private data stored in v_data must have a + * 'gfs_file_t' as its first field. + * + * Given these constraints, this routine will automatically: + * + * - Allocate v_data for the vnode + * - Initialize necessary fields in the vnode + * - Hold the parent + */ +vnode_t * +gfs_file_create(size_t size, vnode_t *pvp, vnodeops_t *ops) +{ + gfs_file_t *fp; + vnode_t *vp; + + /* + * Allocate vnode and internal data structure + */ + fp = kmem_zalloc(size, KM_SLEEP); + vp = vn_alloc(KM_SLEEP); + + /* + * Set up various pointers + */ + fp->gfs_vnode = vp; + fp->gfs_parent = pvp; + vp->v_data = fp; + fp->gfs_size = size; + fp->gfs_type = GFS_FILE; + + /* + * Initialize vnode and hold parent. + */ + vn_setops(vp, ops); + if (pvp) { + VN_SET_VFS_TYPE_DEV(vp, pvp->v_vfsp, VREG, 0); + VN_HOLD(pvp); + } + + return (vp); +} + +/* + * gfs_dir_create: creates a new directory in the parent + * + * size - size of private data structure (v_data) + * pvp - parent vnode (GFS directory) + * ops - vnode operations vector + * entries - NULL-terminated list of static entries (if any) + * maxlen - maximum length of a directory entry + * readdir_cb - readdir callback (see gfs_dir_readdir) + * inode_cb - inode callback (see gfs_dir_readdir) + * lookup_cb - lookup callback (see gfs_dir_lookup) + * + * In order to use this function, the first member of the private vnode + * structure (v_data) must be a gfs_dir_t. For each directory, there are + * static entries, defined when the structure is initialized, and dynamic + * entries, retrieved through callbacks. + * + * If a directory has static entries, then it must supply a inode callback, + * which will compute the inode number based on the parent and the index. + * For a directory with dynamic entries, the caller must supply a readdir + * callback and a lookup callback. If a static lookup fails, we fall back to + * the supplied lookup callback, if any. + * + * This function also performs the same initialization as gfs_file_create(). + */ +vnode_t * +gfs_dir_create(size_t struct_size, vnode_t *pvp, vnodeops_t *ops, + gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen, + gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb) +{ + vnode_t *vp; + gfs_dir_t *dp; + gfs_dirent_t *de; + + vp = gfs_file_create(struct_size, pvp, ops); + vp->v_type = VDIR; + + dp = vp->v_data; + dp->gfsd_file.gfs_type = GFS_DIR; + dp->gfsd_maxlen = maxlen; + + if (entries != NULL) { + for (de = entries; de->gfse_name != NULL; de++) + dp->gfsd_nstatic++; + + dp->gfsd_static = kmem_alloc( + dp->gfsd_nstatic * sizeof (gfs_dirent_t), KM_SLEEP); + bcopy(entries, dp->gfsd_static, + dp->gfsd_nstatic * sizeof (gfs_dirent_t)); + } + + dp->gfsd_readdir = readdir_cb; + dp->gfsd_lookup = lookup_cb; + dp->gfsd_inode = inode_cb; + + mutex_init(&dp->gfsd_lock, NULL, MUTEX_DEFAULT, NULL); + + return (vp); +} + +/* + * gfs_root_create(): create a root vnode for a GFS filesystem + * + * Similar to gfs_dir_create(), this creates a root vnode for a filesystem. The + * only difference is that it takes a vfs_t instead of a vnode_t as its parent. + */ +vnode_t * +gfs_root_create(size_t size, vfs_t *vfsp, vnodeops_t *ops, ino64_t ino, + gfs_dirent_t *entries, gfs_inode_cb inode_cb, int maxlen, + gfs_readdir_cb readdir_cb, gfs_lookup_cb lookup_cb) +{ + vnode_t *vp = gfs_dir_create(size, NULL, ops, entries, inode_cb, + maxlen, readdir_cb, lookup_cb); + + /* Manually set the inode */ + ((gfs_file_t *)vp->v_data)->gfs_ino = ino; + + VFS_HOLD(vfsp); + VN_SET_VFS_TYPE_DEV(vp, vfsp, VDIR, 0); + vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT; + + return (vp); +} + +/* + * gfs_root_create_file(): create a root vnode for a GFS file as a filesystem + * + * Similar to gfs_root_create(), this creates a root vnode for a file to + * be the pseudo-filesystem. + */ +vnode_t * +gfs_root_create_file(size_t size, vfs_t *vfsp, vnodeops_t *ops, ino64_t ino) +{ + vnode_t *vp = gfs_file_create(size, NULL, ops); + + ((gfs_file_t *)vp->v_data)->gfs_ino = ino; + + VFS_HOLD(vfsp); + VN_SET_VFS_TYPE_DEV(vp, vfsp, VREG, 0); + vp->v_flag |= VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT; + + return (vp); +} + +/* + * gfs_file_inactive() + * + * Called from the VOP_INACTIVE() routine. If necessary, this routine will + * remove the given vnode from the parent directory and clean up any references + * in the VFS layer. + * + * If the vnode was not removed (due to a race with vget), then NULL is + * returned. Otherwise, a pointer to the private data is returned. + */ +void * +gfs_file_inactive(vnode_t *vp) +{ + int i; + gfs_dirent_t *ge = NULL; + gfs_file_t *fp = vp->v_data; + gfs_dir_t *dp = NULL; + void *data; + + if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR)) + goto found; + + dp = fp->gfs_parent->v_data; + + /* + * First, see if this vnode is cached in the parent. + */ + gfs_dir_lock(dp); + + /* + * Find it in the set of static entries. + */ + for (i = 0; i < dp->gfsd_nstatic; i++) { + ge = &dp->gfsd_static[i]; + + if (ge->gfse_vnode == vp) + goto found; + } + + /* + * If 'ge' is NULL, then it is a dynamic entry. + */ + ge = NULL; + +found: + if (vp->v_flag & V_XATTRDIR) { + mutex_enter(&fp->gfs_parent->v_lock); + } + mutex_enter(&vp->v_lock); + if (vp->v_count == 1) { + /* + * Really remove this vnode + */ + data = vp->v_data; + if (ge != NULL) { + /* + * If this was a statically cached entry, simply set the + * cached vnode to NULL. + */ + ge->gfse_vnode = NULL; + } + if (vp->v_flag & V_XATTRDIR) { + fp->gfs_parent->v_xattrdir = NULL; + mutex_exit(&fp->gfs_parent->v_lock); + } + mutex_exit(&vp->v_lock); + + /* + * Free vnode and release parent + */ + if (fp->gfs_parent) { + if (dp) { + gfs_dir_unlock(dp); + } + VN_RELE(fp->gfs_parent); + } else { + ASSERT(vp->v_vfsp != NULL); + VFS_RELE(vp->v_vfsp); + } + vn_free(vp); + } else { + vp->v_count--; + data = NULL; + mutex_exit(&vp->v_lock); + if (vp->v_flag & V_XATTRDIR) { + mutex_exit(&fp->gfs_parent->v_lock); + } + if (dp) + gfs_dir_unlock(dp); + } + + return (data); +} + +/* + * gfs_dir_inactive() + * + * Same as above, but for directories. + */ +void * +gfs_dir_inactive(vnode_t *vp) +{ + gfs_dir_t *dp; + + ASSERT(vp->v_type == VDIR); + + if ((dp = gfs_file_inactive(vp)) != NULL) { + mutex_destroy(&dp->gfsd_lock); + if (dp->gfsd_nstatic) + kmem_free(dp->gfsd_static, + dp->gfsd_nstatic * sizeof (gfs_dirent_t)); + } + + return (dp); +} + +/* + * gfs_dir_lookup_dynamic() + * + * This routine looks up the provided name amongst the dynamic entries + * in the gfs directory and returns the corresponding vnode, if found. + * + * The gfs directory is expected to be locked by the caller prior to + * calling this function. The directory will be unlocked during the + * execution of this function, but will be locked upon return from the + * function. This function returns 0 on success, non-zero on error. + * + * The dynamic lookups are performed by invoking the lookup + * callback, which is passed to this function as the first argument. + * The arguments to the callback are: + * + * int gfs_lookup_cb(vnode_t *pvp, const char *nm, vnode_t **vpp, cred_t *cr, + * int flags, int *deflgs, pathname_t *rpnp); + * + * pvp - parent vnode + * nm - name of entry + * vpp - pointer to resulting vnode + * cr - pointer to cred + * flags - flags value from lookup request + * ignored here; currently only used to request + * insensitive lookups + * direntflgs - output parameter, directory entry flags + * ignored here; currently only used to indicate a lookup + * has more than one possible match when case is not considered + * realpnp - output parameter, real pathname + * ignored here; when lookup was performed case-insensitively, + * this field contains the "real" name of the file. + * + * Returns 0 on success, non-zero on error. + */ +static int +gfs_dir_lookup_dynamic(gfs_lookup_cb callback, gfs_dir_t *dp, + const char *nm, vnode_t *dvp, vnode_t **vpp, cred_t *cr, int flags, + int *direntflags, pathname_t *realpnp) +{ + gfs_file_t *fp; + ino64_t ino; + int ret; + + ASSERT(GFS_DIR_LOCKED(dp)); + + /* + * Drop the directory lock, as the lookup routine + * will need to allocate memory, or otherwise deadlock on this + * directory. + */ + gfs_dir_unlock(dp); + ret = callback(dvp, nm, vpp, &ino, cr, flags, direntflags, realpnp); + gfs_dir_lock(dp); + + /* + * The callback for extended attributes returns a vnode + * with v_data from an underlying fs. + */ + if (ret == 0 && !IS_XATTRDIR(dvp)) { + fp = (gfs_file_t *)((*vpp)->v_data); + fp->gfs_index = -1; + fp->gfs_ino = ino; + } + + return (ret); +} + +/* + * gfs_dir_lookup_static() + * + * This routine looks up the provided name amongst the static entries + * in the gfs directory and returns the corresponding vnode, if found. + * The first argument to the function is a pointer to the comparison + * function this function should use to decide if names are a match. + * + * If a match is found, and GFS_CACHE_VNODE is set and the vnode + * exists, we simply return the existing vnode. Otherwise, we call + * the static entry's callback routine, caching the result if + * necessary. If the idx pointer argument is non-NULL, we use it to + * return the index of the matching static entry. + * + * The gfs directory is expected to be locked by the caller prior to calling + * this function. The directory may be unlocked during the execution of + * this function, but will be locked upon return from the function. + * + * This function returns 0 if a match is found, ENOENT if not. + */ +static int +gfs_dir_lookup_static(int (*compare)(const char *, const char *), + gfs_dir_t *dp, const char *nm, vnode_t *dvp, int *idx, + vnode_t **vpp, pathname_t *rpnp) +{ + gfs_dirent_t *ge; + vnode_t *vp = NULL; + int i; + + ASSERT(GFS_DIR_LOCKED(dp)); + + /* + * Search static entries. + */ + for (i = 0; i < dp->gfsd_nstatic; i++) { + ge = &dp->gfsd_static[i]; + + if (compare(ge->gfse_name, nm) == 0) { + if (rpnp) + (void) strlcpy(rpnp->pn_buf, ge->gfse_name, + rpnp->pn_bufsize); + + if (ge->gfse_vnode) { + ASSERT(ge->gfse_flags & GFS_CACHE_VNODE); + vp = ge->gfse_vnode; + VN_HOLD(vp); + break; + } + + /* + * We drop the directory lock, as the constructor will + * need to do KM_SLEEP allocations. If we return from + * the constructor only to find that a parallel + * operation has completed, and GFS_CACHE_VNODE is set + * for this entry, we discard the result in favor of + * the cached vnode. + */ + gfs_dir_unlock(dp); + vp = ge->gfse_ctor(dvp); + gfs_dir_lock(dp); + + ((gfs_file_t *)vp->v_data)->gfs_index = i; + + /* Set the inode according to the callback. */ + ((gfs_file_t *)vp->v_data)->gfs_ino = + dp->gfsd_inode(dvp, i); + + if (ge->gfse_flags & GFS_CACHE_VNODE) { + if (ge->gfse_vnode == NULL) { + ge->gfse_vnode = vp; + } else { + /* + * A parallel constructor beat us to it; + * return existing vnode. We have to be + * careful because we can't release the + * current vnode while holding the + * directory lock; its inactive routine + * will try to lock this directory. + */ + vnode_t *oldvp = vp; + vp = ge->gfse_vnode; + VN_HOLD(vp); + + gfs_dir_unlock(dp); + VN_RELE(oldvp); + gfs_dir_lock(dp); + } + } + break; + } + } + + if (vp == NULL) + return (ENOENT); + else if (idx) + *idx = i; + *vpp = vp; + return (0); +} + +/* + * gfs_dir_lookup() + * + * Looks up the given name in the directory and returns the corresponding + * vnode, if found. + * + * First, we search statically defined entries, if any, with a call to + * gfs_dir_lookup_static(). If no static entry is found, and we have + * a callback function we try a dynamic lookup via gfs_dir_lookup_dynamic(). + * + * This function returns 0 on success, non-zero on error. + */ +int +gfs_dir_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp, cred_t *cr, + int flags, int *direntflags, pathname_t *realpnp) +{ + gfs_dir_t *dp = dvp->v_data; + boolean_t casecheck; + vnode_t *dynvp = NULL; + vnode_t *vp = NULL; + int (*compare)(const char *, const char *); + int error, idx; + + ASSERT(dvp->v_type == VDIR); + + if (gfs_lookup_dot(vpp, dvp, dp->gfsd_file.gfs_parent, nm) == 0) + return (0); + + casecheck = (flags & FIGNORECASE) != 0 && direntflags != NULL; + if (vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) || + (flags & FIGNORECASE)) + compare = strcasecmp; + else + compare = strcmp; + + gfs_dir_lock(dp); + + error = gfs_dir_lookup_static(compare, dp, nm, dvp, &idx, &vp, realpnp); + + if (vp && casecheck) { + gfs_dirent_t *ge; + int i; + + for (i = idx + 1; i < dp->gfsd_nstatic; i++) { + ge = &dp->gfsd_static[i]; + + if (strcasecmp(ge->gfse_name, nm) == 0) { + *direntflags |= ED_CASE_CONFLICT; + goto out; + } + } + } + + if ((error || casecheck) && dp->gfsd_lookup) + error = gfs_dir_lookup_dynamic(dp->gfsd_lookup, dp, nm, dvp, + &dynvp, cr, flags, direntflags, vp ? NULL : realpnp); + + if (vp && dynvp) { + /* static and dynamic entries are case-insensitive conflict */ + ASSERT(casecheck); + *direntflags |= ED_CASE_CONFLICT; + VN_RELE(dynvp); + } else if (vp == NULL) { + vp = dynvp; + } else if (error == ENOENT) { + error = 0; + } else if (error) { + VN_RELE(vp); + vp = NULL; + } + +out: + gfs_dir_unlock(dp); + + *vpp = vp; + return (error); +} + +/* + * gfs_dir_readdir: does a readdir() on the given directory + * + * dvp - directory vnode + * uiop - uio structure + * eofp - eof pointer + * data - arbitrary data passed to readdir callback + * + * This routine does all the readdir() dirty work. Even so, the caller must + * supply two callbacks in order to get full compatibility. + * + * If the directory contains static entries, an inode callback must be + * specified. This avoids having to create every vnode and call VOP_GETATTR() + * when reading the directory. This function has the following arguments: + * + * ino_t gfs_inode_cb(vnode_t *vp, int index); + * + * vp - vnode for the directory + * index - index in original gfs_dirent_t array + * + * Returns the inode number for the given entry. + * + * For directories with dynamic entries, a readdir callback must be provided. + * This is significantly more complex, thanks to the particulars of + * VOP_READDIR(). + * + * int gfs_readdir_cb(vnode_t *vp, void *dp, int *eofp, + * offset_t *off, offset_t *nextoff, void *data, int flags) + * + * vp - directory vnode + * dp - directory entry, sized according to maxlen given to + * gfs_dir_create(). callback must fill in d_name and + * d_ino (if a dirent64_t), or ed_name, ed_ino, and ed_eflags + * (if an edirent_t). edirent_t is used if V_RDDIR_ENTFLAGS + * is set in 'flags'. + * eofp - callback must set to 1 when EOF has been reached + * off - on entry, the last offset read from the directory. Callback + * must set to the offset of the current entry, typically left + * untouched. + * nextoff - callback must set to offset of next entry. Typically + * (off + 1) + * data - caller-supplied data + * flags - VOP_READDIR flags + * + * Return 0 on success, or error on failure. + */ +int +gfs_dir_readdir(vnode_t *dvp, uio_t *uiop, int *eofp, void *data, cred_t *cr, + caller_context_t *ct, int flags) +{ + gfs_readdir_state_t gstate; + int error, eof = 0; + ino64_t ino, pino; + offset_t off, next; + gfs_dir_t *dp = dvp->v_data; + + error = gfs_get_parent_ino(dvp, cr, ct, &pino, &ino); + if (error) + return (error); + + if ((error = gfs_readdir_init(&gstate, dp->gfsd_maxlen, 1, uiop, + pino, ino, flags)) != 0) + return (error); + + while ((error = gfs_readdir_pred(&gstate, uiop, &off)) == 0 && + !eof) { + + if (off >= 0 && off < dp->gfsd_nstatic) { + ino = dp->gfsd_inode(dvp, off); + + if ((error = gfs_readdir_emit(&gstate, uiop, + off, ino, dp->gfsd_static[off].gfse_name, 0)) + != 0) + break; + + } else if (dp->gfsd_readdir) { + off -= dp->gfsd_nstatic; + + if ((error = dp->gfsd_readdir(dvp, + gstate.grd_dirent, &eof, &off, &next, + data, flags)) != 0 || eof) + break; + + off += dp->gfsd_nstatic + 2; + next += dp->gfsd_nstatic + 2; + + if ((error = gfs_readdir_emit_int(&gstate, uiop, + next)) != 0) + break; + } else { + /* + * Offset is beyond the end of the static entries, and + * we have no dynamic entries. Set EOF. + */ + eof = 1; + } + } + + return (gfs_readdir_fini(&gstate, error, eofp, eof)); +} + + +/* + * gfs_vop_lookup: VOP_LOOKUP() entry point + * + * For use directly in vnode ops table. Given a GFS directory, calls + * gfs_dir_lookup() as necessary. + */ +/* ARGSUSED */ +int +gfs_vop_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, + int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, + int *direntflags, pathname_t *realpnp) +{ + return (gfs_dir_lookup(dvp, nm, vpp, cr, flags, direntflags, realpnp)); +} + +/* + * gfs_vop_readdir: VOP_READDIR() entry point + * + * For use directly in vnode ops table. Given a GFS directory, calls + * gfs_dir_readdir() as necessary. + */ +/* ARGSUSED */ +int +gfs_vop_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp, + caller_context_t *ct, int flags) +{ + return (gfs_dir_readdir(vp, uiop, eofp, NULL, cr, ct, flags)); +} + + +/* + * gfs_vop_map: VOP_MAP() entry point + * + * Convenient routine for handling pseudo-files that wish to allow mmap() calls. + * This function only works for readonly files, and uses the read function for + * the vnode to fill in the data. The mapped data is immediately faulted in and + * filled with the necessary data during this call; there are no getpage() or + * putpage() routines. + */ +/* ARGSUSED */ +int +gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, + size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred, + caller_context_t *ct) +{ + int rv; + ssize_t resid = len; + + /* + * Check for bad parameters + */ +#ifdef _ILP32 + if (len > MAXOFF_T) + return (ENOMEM); +#endif + if (vp->v_flag & VNOMAP) + return (ENOTSUP); + if (off > MAXOFF_T) + return (EFBIG); + if ((long)off < 0 || (long)(off + len) < 0) + return (EINVAL); + if (vp->v_type != VREG) + return (ENODEV); + if ((prot & (PROT_EXEC | PROT_WRITE)) != 0) + return (EACCES); + + /* + * Find appropriate address if needed, otherwise clear address range. + */ + as_rangelock(as); + rv = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); + if (rv != 0) { + as_rangeunlock(as); + return (rv); + } + + /* + * Create mapping + */ + rv = as_map(as, *addrp, len, segvn_create, zfod_argsp); + as_rangeunlock(as); + if (rv != 0) + return (rv); + + /* + * Fill with data from read() + */ + rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE, + 0, (rlim64_t)0, cred, &resid); + + if (rv == 0 && resid != 0) + rv = ENXIO; + + if (rv != 0) { + as_rangelock(as); + (void) as_unmap(as, *addrp, len); + as_rangeunlock(as); + } + + return (rv); +} + +/* + * gfs_vop_inactive: VOP_INACTIVE() entry point + * + * Given a vnode that is a GFS file or directory, call gfs_file_inactive() or + * gfs_dir_inactive() as necessary, and kmem_free()s associated private data. + */ +/* ARGSUSED */ +void +gfs_vop_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) +{ + gfs_file_t *fp = vp->v_data; + void *data; + + if (fp->gfs_type == GFS_DIR) + data = gfs_dir_inactive(vp); + else + data = gfs_file_inactive(vp); + + if (data != NULL) + kmem_free(data, fp->gfs_size); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/arc.c b/external/cddl/osnet/dist/uts/common/fs/zfs/arc.c new file mode 100644 index 000000000000..e6bba841ace5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/arc.c @@ -0,0 +1,4480 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * DVA-based Adjustable Replacement Cache + * + * While much of the theory of operation used here is + * based on the self-tuning, low overhead replacement cache + * presented by Megiddo and Modha at FAST 2003, there are some + * significant differences: + * + * 1. The Megiddo and Modha model assumes any page is evictable. + * Pages in its cache cannot be "locked" into memory. This makes + * the eviction algorithm simple: evict the last page in the list. + * This also make the performance characteristics easy to reason + * about. Our cache is not so simple. At any given moment, some + * subset of the blocks in the cache are un-evictable because we + * have handed out a reference to them. Blocks are only evictable + * when there are no external references active. This makes + * eviction far more problematic: we choose to evict the evictable + * blocks that are the "lowest" in the list. + * + * There are times when it is not possible to evict the requested + * space. In these circumstances we are unable to adjust the cache + * size. To prevent the cache growing unbounded at these times we + * implement a "cache throttle" that slows the flow of new data + * into the cache until we can make space available. + * + * 2. The Megiddo and Modha model assumes a fixed cache size. + * Pages are evicted when the cache is full and there is a cache + * miss. Our model has a variable sized cache. It grows with + * high use, but also tries to react to memory pressure from the + * operating system: decreasing its size when system memory is + * tight. + * + * 3. The Megiddo and Modha model assumes a fixed page size. All + * elements of the cache are therefor exactly the same size. So + * when adjusting the cache size following a cache miss, its simply + * a matter of choosing a single page to evict. In our model, we + * have variable sized cache blocks (rangeing from 512 bytes to + * 128K bytes). We therefor choose a set of blocks to evict to make + * space for a cache miss that approximates as closely as possible + * the space used by the new block. + * + * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" + * by N. Megiddo & D. Modha, FAST 2003 + */ + +/* + * The locking model: + * + * A new reference to a cache buffer can be obtained in two + * ways: 1) via a hash table lookup using the DVA as a key, + * or 2) via one of the ARC lists. The arc_read() interface + * uses method 1, while the internal arc algorithms for + * adjusting the cache use method 2. We therefor provide two + * types of locks: 1) the hash table lock array, and 2) the + * arc list locks. + * + * Buffers do not have their own mutexs, rather they rely on the + * hash table mutexs for the bulk of their protection (i.e. most + * fields in the arc_buf_hdr_t are protected by these mutexs). + * + * buf_hash_find() returns the appropriate mutex (held) when it + * locates the requested buffer in the hash table. It returns + * NULL for the mutex if the buffer was not in the table. + * + * buf_hash_remove() expects the appropriate hash mutex to be + * already held before it is invoked. + * + * Each arc state also has a mutex which is used to protect the + * buffer list associated with the state. When attempting to + * obtain a hash table lock while holding an arc list lock you + * must use: mutex_tryenter() to avoid deadlock. Also note that + * the active state mutex must be held before the ghost state mutex. + * + * Arc buffers may have an associated eviction callback function. + * This function will be invoked prior to removing the buffer (e.g. + * in arc_do_user_evicts()). Note however that the data associated + * with the buffer may be evicted prior to the callback. The callback + * must be made with *no locks held* (to prevent deadlock). Additionally, + * the users of callbacks must ensure that their private data is + * protected from simultaneous callbacks from arc_buf_evict() + * and arc_do_user_evicts(). + * + * Note that the majority of the performance stats are manipulated + * with atomic operations. + * + * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: + * + * - L2ARC buflist creation + * - L2ARC buflist eviction + * - L2ARC write completion, which walks L2ARC buflists + * - ARC header destruction, as it removes from L2ARC buflists + * - ARC header release, as it removes from L2ARC buflists + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef _KERNEL +#include +#include +#include +#include +#endif +#include +#include + +static kmutex_t arc_reclaim_thr_lock; +static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ +static uint8_t arc_thread_exit; + +extern int zfs_write_limit_shift; +extern uint64_t zfs_write_limit_max; +extern kmutex_t zfs_write_limit_lock; + +#define ARC_REDUCE_DNLC_PERCENT 3 +uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; + +typedef enum arc_reclaim_strategy { + ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ + ARC_RECLAIM_CONS /* Conservative reclaim strategy */ +} arc_reclaim_strategy_t; + +/* number of seconds before growing cache again */ +static int arc_grow_retry = 60; + +/* + * minimum lifespan of a prefetch block in clock ticks + * (initialized in arc_init()) + */ +static int arc_min_prefetch_lifespan; + +static int arc_dead; + +/* + * The arc has filled available memory and has now warmed up. + */ +static boolean_t arc_warm; + +/* + * These tunables are for performance analysis. + */ +uint64_t zfs_arc_max; +uint64_t zfs_arc_min; +uint64_t zfs_arc_meta_limit = 0; +int zfs_mdcomp_disable = 0; + +/* + * Note that buffers can be in one of 6 states: + * ARC_anon - anonymous (discussed below) + * ARC_mru - recently used, currently cached + * ARC_mru_ghost - recentely used, no longer in cache + * ARC_mfu - frequently used, currently cached + * ARC_mfu_ghost - frequently used, no longer in cache + * ARC_l2c_only - exists in L2ARC but not other states + * When there are no active references to the buffer, they are + * are linked onto a list in one of these arc states. These are + * the only buffers that can be evicted or deleted. Within each + * state there are multiple lists, one for meta-data and one for + * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, + * etc.) is tracked separately so that it can be managed more + * explicitly: favored over data, limited explicitly. + * + * Anonymous buffers are buffers that are not associated with + * a DVA. These are buffers that hold dirty block copies + * before they are written to stable storage. By definition, + * they are "ref'd" and are considered part of arc_mru + * that cannot be freed. Generally, they will aquire a DVA + * as they are written and migrate onto the arc_mru list. + * + * The ARC_l2c_only state is for buffers that are in the second + * level ARC but no longer in any of the ARC_m* lists. The second + * level ARC itself may also contain buffers that are in any of + * the ARC_m* states - meaning that a buffer can exist in two + * places. The reason for the ARC_l2c_only state is to keep the + * buffer header in the hash table, so that reads that hit the + * second level ARC benefit from these fast lookups. + */ + +typedef struct arc_state { + list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ + uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ + uint64_t arcs_size; /* total amount of data in this state */ + kmutex_t arcs_mtx; +} arc_state_t; + +/* The 6 states: */ +static arc_state_t ARC_anon; +static arc_state_t ARC_mru; +static arc_state_t ARC_mru_ghost; +static arc_state_t ARC_mfu; +static arc_state_t ARC_mfu_ghost; +static arc_state_t ARC_l2c_only; + +typedef struct arc_stats { + kstat_named_t arcstat_hits; + kstat_named_t arcstat_misses; + kstat_named_t arcstat_demand_data_hits; + kstat_named_t arcstat_demand_data_misses; + kstat_named_t arcstat_demand_metadata_hits; + kstat_named_t arcstat_demand_metadata_misses; + kstat_named_t arcstat_prefetch_data_hits; + kstat_named_t arcstat_prefetch_data_misses; + kstat_named_t arcstat_prefetch_metadata_hits; + kstat_named_t arcstat_prefetch_metadata_misses; + kstat_named_t arcstat_mru_hits; + kstat_named_t arcstat_mru_ghost_hits; + kstat_named_t arcstat_mfu_hits; + kstat_named_t arcstat_mfu_ghost_hits; + kstat_named_t arcstat_deleted; + kstat_named_t arcstat_recycle_miss; + kstat_named_t arcstat_mutex_miss; + kstat_named_t arcstat_evict_skip; + kstat_named_t arcstat_hash_elements; + kstat_named_t arcstat_hash_elements_max; + kstat_named_t arcstat_hash_collisions; + kstat_named_t arcstat_hash_chains; + kstat_named_t arcstat_hash_chain_max; + kstat_named_t arcstat_p; + kstat_named_t arcstat_c; + kstat_named_t arcstat_c_min; + kstat_named_t arcstat_c_max; + kstat_named_t arcstat_size; + kstat_named_t arcstat_hdr_size; + kstat_named_t arcstat_l2_hits; + kstat_named_t arcstat_l2_misses; + kstat_named_t arcstat_l2_feeds; + kstat_named_t arcstat_l2_rw_clash; + kstat_named_t arcstat_l2_writes_sent; + kstat_named_t arcstat_l2_writes_done; + kstat_named_t arcstat_l2_writes_error; + kstat_named_t arcstat_l2_writes_hdr_miss; + kstat_named_t arcstat_l2_evict_lock_retry; + kstat_named_t arcstat_l2_evict_reading; + kstat_named_t arcstat_l2_free_on_write; + kstat_named_t arcstat_l2_abort_lowmem; + kstat_named_t arcstat_l2_cksum_bad; + kstat_named_t arcstat_l2_io_error; + kstat_named_t arcstat_l2_size; + kstat_named_t arcstat_l2_hdr_size; + kstat_named_t arcstat_memory_throttle_count; +} arc_stats_t; + +static arc_stats_t arc_stats = { + { "hits", KSTAT_DATA_UINT64 }, + { "misses", KSTAT_DATA_UINT64 }, + { "demand_data_hits", KSTAT_DATA_UINT64 }, + { "demand_data_misses", KSTAT_DATA_UINT64 }, + { "demand_metadata_hits", KSTAT_DATA_UINT64 }, + { "demand_metadata_misses", KSTAT_DATA_UINT64 }, + { "prefetch_data_hits", KSTAT_DATA_UINT64 }, + { "prefetch_data_misses", KSTAT_DATA_UINT64 }, + { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, + { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, + { "mru_hits", KSTAT_DATA_UINT64 }, + { "mru_ghost_hits", KSTAT_DATA_UINT64 }, + { "mfu_hits", KSTAT_DATA_UINT64 }, + { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, + { "deleted", KSTAT_DATA_UINT64 }, + { "recycle_miss", KSTAT_DATA_UINT64 }, + { "mutex_miss", KSTAT_DATA_UINT64 }, + { "evict_skip", KSTAT_DATA_UINT64 }, + { "hash_elements", KSTAT_DATA_UINT64 }, + { "hash_elements_max", KSTAT_DATA_UINT64 }, + { "hash_collisions", KSTAT_DATA_UINT64 }, + { "hash_chains", KSTAT_DATA_UINT64 }, + { "hash_chain_max", KSTAT_DATA_UINT64 }, + { "p", KSTAT_DATA_UINT64 }, + { "c", KSTAT_DATA_UINT64 }, + { "c_min", KSTAT_DATA_UINT64 }, + { "c_max", KSTAT_DATA_UINT64 }, + { "size", KSTAT_DATA_UINT64 }, + { "hdr_size", KSTAT_DATA_UINT64 }, + { "l2_hits", KSTAT_DATA_UINT64 }, + { "l2_misses", KSTAT_DATA_UINT64 }, + { "l2_feeds", KSTAT_DATA_UINT64 }, + { "l2_rw_clash", KSTAT_DATA_UINT64 }, + { "l2_writes_sent", KSTAT_DATA_UINT64 }, + { "l2_writes_done", KSTAT_DATA_UINT64 }, + { "l2_writes_error", KSTAT_DATA_UINT64 }, + { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, + { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, + { "l2_evict_reading", KSTAT_DATA_UINT64 }, + { "l2_free_on_write", KSTAT_DATA_UINT64 }, + { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, + { "l2_cksum_bad", KSTAT_DATA_UINT64 }, + { "l2_io_error", KSTAT_DATA_UINT64 }, + { "l2_size", KSTAT_DATA_UINT64 }, + { "l2_hdr_size", KSTAT_DATA_UINT64 }, + { "memory_throttle_count", KSTAT_DATA_UINT64 } +}; + +#define ARCSTAT(stat) (arc_stats.stat.value.ui64) + +#define ARCSTAT_INCR(stat, val) \ + atomic_add_64(&arc_stats.stat.value.ui64, (val)); + +#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) +#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) + +#define ARCSTAT_MAX(stat, val) { \ + uint64_t m; \ + while ((val) > (m = arc_stats.stat.value.ui64) && \ + (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ + continue; \ +} + +#define ARCSTAT_MAXSTAT(stat) \ + ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) + +/* + * We define a macro to allow ARC hits/misses to be easily broken down by + * two separate conditions, giving a total of four different subtypes for + * each of hits and misses (so eight statistics total). + */ +#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ + if (cond1) { \ + if (cond2) { \ + ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ + } else { \ + ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ + } \ + } else { \ + if (cond2) { \ + ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ + } else { \ + ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ + } \ + } + +kstat_t *arc_ksp; +static arc_state_t *arc_anon; +static arc_state_t *arc_mru; +static arc_state_t *arc_mru_ghost; +static arc_state_t *arc_mfu; +static arc_state_t *arc_mfu_ghost; +static arc_state_t *arc_l2c_only; + +/* + * There are several ARC variables that are critical to export as kstats -- + * but we don't want to have to grovel around in the kstat whenever we wish to + * manipulate them. For these variables, we therefore define them to be in + * terms of the statistic variable. This assures that we are not introducing + * the possibility of inconsistency by having shadow copies of the variables, + * while still allowing the code to be readable. + */ +#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ +#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ +#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ +#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ +#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ + +static int arc_no_grow; /* Don't try to grow cache size */ +static uint64_t arc_tempreserve; +static uint64_t arc_meta_used; +static uint64_t arc_meta_limit; +static uint64_t arc_meta_max = 0; + +typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; + +typedef struct arc_callback arc_callback_t; + +struct arc_callback { + void *acb_private; + arc_done_func_t *acb_done; + arc_buf_t *acb_buf; + zio_t *acb_zio_dummy; + arc_callback_t *acb_next; +}; + +typedef struct arc_write_callback arc_write_callback_t; + +struct arc_write_callback { + void *awcb_private; + arc_done_func_t *awcb_ready; + arc_done_func_t *awcb_done; + arc_buf_t *awcb_buf; +}; + +struct arc_buf_hdr { + /* protected by hash lock */ + dva_t b_dva; + uint64_t b_birth; + uint64_t b_cksum0; + + kmutex_t b_freeze_lock; + zio_cksum_t *b_freeze_cksum; + + arc_buf_hdr_t *b_hash_next; + arc_buf_t *b_buf; + uint32_t b_flags; + uint32_t b_datacnt; + + arc_callback_t *b_acb; + kcondvar_t b_cv; + + /* immutable */ + arc_buf_contents_t b_type; + uint64_t b_size; + spa_t *b_spa; + + /* protected by arc state mutex */ + arc_state_t *b_state; + list_node_t b_arc_node; + + /* updated atomically */ + clock_t b_arc_access; + + /* self protecting */ + refcount_t b_refcnt; + + l2arc_buf_hdr_t *b_l2hdr; + list_node_t b_l2node; +}; + +static arc_buf_t *arc_eviction_list; +static kmutex_t arc_eviction_mtx; +static arc_buf_hdr_t arc_eviction_hdr; +static void arc_get_data_buf(arc_buf_t *buf); +static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); +static int arc_evict_needed(arc_buf_contents_t type); +static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); + +#define GHOST_STATE(state) \ + ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ + (state) == arc_l2c_only) + +/* + * Private ARC flags. These flags are private ARC only flags that will show up + * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can + * be passed in as arc_flags in things like arc_read. However, these flags + * should never be passed and should only be set by ARC code. When adding new + * public flags, make sure not to smash the private ones. + */ + +#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ +#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ +#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ +#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ +#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ +#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ +#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ +#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ +#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ +#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ +#define ARC_STORED (1 << 19) /* has been store()d to */ + +#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) +#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) +#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) +#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) +#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) +#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) +#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) +#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ + (hdr)->b_l2hdr != NULL) +#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) +#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) +#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) + +/* + * Other sizes + */ + +#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) +#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) + +/* + * Hash table routines + */ + +#define HT_LOCK_PAD 64 + +struct ht_lock { + kmutex_t ht_lock; +#ifdef _KERNEL + unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; +#endif +}; + +#define BUF_LOCKS 256 +typedef struct buf_hash_table { + uint64_t ht_mask; + arc_buf_hdr_t **ht_table; + struct ht_lock ht_locks[BUF_LOCKS]; +} buf_hash_table_t; + +static buf_hash_table_t buf_hash_table; + +#define BUF_HASH_INDEX(spa, dva, birth) \ + (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) +#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) +#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) +#define HDR_LOCK(buf) \ + (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) + +uint64_t zfs_crc64_table[256]; + +/* + * Level 2 ARC + */ + +#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ +#define L2ARC_HEADROOM 4 /* num of writes */ +#define L2ARC_FEED_SECS 1 /* caching interval */ + +#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) +#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) + +/* + * L2ARC Performance Tunables + */ +uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ +uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ +uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ +uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ +boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ + +/* + * L2ARC Internals + */ +typedef struct l2arc_dev { + vdev_t *l2ad_vdev; /* vdev */ + spa_t *l2ad_spa; /* spa */ + uint64_t l2ad_hand; /* next write location */ + uint64_t l2ad_write; /* desired write size, bytes */ + uint64_t l2ad_boost; /* warmup write boost, bytes */ + uint64_t l2ad_start; /* first addr on device */ + uint64_t l2ad_end; /* last addr on device */ + uint64_t l2ad_evict; /* last addr eviction reached */ + boolean_t l2ad_first; /* first sweep through */ + list_t *l2ad_buflist; /* buffer list */ + list_node_t l2ad_node; /* device list node */ +} l2arc_dev_t; + +static list_t L2ARC_dev_list; /* device list */ +static list_t *l2arc_dev_list; /* device list pointer */ +static kmutex_t l2arc_dev_mtx; /* device list mutex */ +static l2arc_dev_t *l2arc_dev_last; /* last device used */ +static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ +static list_t L2ARC_free_on_write; /* free after write buf list */ +static list_t *l2arc_free_on_write; /* free after write list ptr */ +static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ +static uint64_t l2arc_ndev; /* number of devices */ + +typedef struct l2arc_read_callback { + arc_buf_t *l2rcb_buf; /* read buffer */ + spa_t *l2rcb_spa; /* spa */ + blkptr_t l2rcb_bp; /* original blkptr */ + zbookmark_t l2rcb_zb; /* original bookmark */ + int l2rcb_flags; /* original flags */ +} l2arc_read_callback_t; + +typedef struct l2arc_write_callback { + l2arc_dev_t *l2wcb_dev; /* device info */ + arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ +} l2arc_write_callback_t; + +struct l2arc_buf_hdr { + /* protected by arc_buf_hdr mutex */ + l2arc_dev_t *b_dev; /* L2ARC device */ + daddr_t b_daddr; /* disk address, offset byte */ +}; + +typedef struct l2arc_data_free { + /* protected by l2arc_free_on_write_mtx */ + void *l2df_data; + size_t l2df_size; + void (*l2df_func)(void *, size_t); + list_node_t l2df_list_node; +} l2arc_data_free_t; + +static kmutex_t l2arc_feed_thr_lock; +static kcondvar_t l2arc_feed_thr_cv; +static uint8_t l2arc_thread_exit; + +static void l2arc_read_done(zio_t *zio); +static void l2arc_hdr_stat_add(void); +static void l2arc_hdr_stat_remove(void); + +static uint64_t +buf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) +{ + uintptr_t spav = (uintptr_t)spa; + uint8_t *vdva = (uint8_t *)dva; + uint64_t crc = -1ULL; + int i; + + ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); + + for (i = 0; i < sizeof (dva_t); i++) + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; + + crc ^= (spav>>8) ^ birth; + + return (crc); +} + +#define BUF_EMPTY(buf) \ + ((buf)->b_dva.dva_word[0] == 0 && \ + (buf)->b_dva.dva_word[1] == 0 && \ + (buf)->b_birth == 0) + +#define BUF_EQUAL(spa, dva, birth, buf) \ + ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ + ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ + ((buf)->b_birth == birth) && ((buf)->b_spa == spa) + +static arc_buf_hdr_t * +buf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) +{ + uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); + kmutex_t *hash_lock = BUF_HASH_LOCK(idx); + arc_buf_hdr_t *buf; + + mutex_enter(hash_lock); + for (buf = buf_hash_table.ht_table[idx]; buf != NULL; + buf = buf->b_hash_next) { + if (BUF_EQUAL(spa, dva, birth, buf)) { + *lockp = hash_lock; + return (buf); + } + } + mutex_exit(hash_lock); + *lockp = NULL; + return (NULL); +} + +/* + * Insert an entry into the hash table. If there is already an element + * equal to elem in the hash table, then the already existing element + * will be returned and the new element will not be inserted. + * Otherwise returns NULL. + */ +static arc_buf_hdr_t * +buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) +{ + uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); + kmutex_t *hash_lock = BUF_HASH_LOCK(idx); + arc_buf_hdr_t *fbuf; + uint32_t i; + + ASSERT(!HDR_IN_HASH_TABLE(buf)); + *lockp = hash_lock; + mutex_enter(hash_lock); + for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; + fbuf = fbuf->b_hash_next, i++) { + if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) + return (fbuf); + } + + buf->b_hash_next = buf_hash_table.ht_table[idx]; + buf_hash_table.ht_table[idx] = buf; + buf->b_flags |= ARC_IN_HASH_TABLE; + + /* collect some hash table performance data */ + if (i > 0) { + ARCSTAT_BUMP(arcstat_hash_collisions); + if (i == 1) + ARCSTAT_BUMP(arcstat_hash_chains); + + ARCSTAT_MAX(arcstat_hash_chain_max, i); + } + + ARCSTAT_BUMP(arcstat_hash_elements); + ARCSTAT_MAXSTAT(arcstat_hash_elements); + + return (NULL); +} + +static void +buf_hash_remove(arc_buf_hdr_t *buf) +{ + arc_buf_hdr_t *fbuf, **bufp; + uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); + + ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); + ASSERT(HDR_IN_HASH_TABLE(buf)); + + bufp = &buf_hash_table.ht_table[idx]; + while ((fbuf = *bufp) != buf) { + ASSERT(fbuf != NULL); + bufp = &fbuf->b_hash_next; + } + *bufp = buf->b_hash_next; + buf->b_hash_next = NULL; + buf->b_flags &= ~ARC_IN_HASH_TABLE; + + /* collect some hash table performance data */ + ARCSTAT_BUMPDOWN(arcstat_hash_elements); + + if (buf_hash_table.ht_table[idx] && + buf_hash_table.ht_table[idx]->b_hash_next == NULL) + ARCSTAT_BUMPDOWN(arcstat_hash_chains); +} + +/* + * Global data structures and functions for the buf kmem cache. + */ +static kmem_cache_t *hdr_cache; +static kmem_cache_t *buf_cache; + +static void +buf_fini(void) +{ + int i; + + kmem_free(buf_hash_table.ht_table, + (buf_hash_table.ht_mask + 1) * sizeof (void *)); + for (i = 0; i < BUF_LOCKS; i++) + mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); + kmem_cache_destroy(hdr_cache); + kmem_cache_destroy(buf_cache); +} + +/* + * Constructor callback - called when the cache is empty + * and a new buf is requested. + */ +/* ARGSUSED */ +static int +hdr_cons(void *vbuf, void *unused, int kmflag) +{ + arc_buf_hdr_t *buf = vbuf; + + bzero(buf, sizeof (arc_buf_hdr_t)); + refcount_create(&buf->b_refcnt); + cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); + mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); + + ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); + return (0); +} + +/* ARGSUSED */ +static int +buf_cons(void *vbuf, void *unused, int kmflag) +{ + arc_buf_t *buf = vbuf; + + bzero(buf, sizeof (arc_buf_t)); + rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); + return (0); +} + +/* + * Destructor callback - called when a cached buf is + * no longer required. + */ +/* ARGSUSED */ +static void +hdr_dest(void *vbuf, void *unused) +{ + arc_buf_hdr_t *buf = vbuf; + + refcount_destroy(&buf->b_refcnt); + cv_destroy(&buf->b_cv); + mutex_destroy(&buf->b_freeze_lock); + + ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); +} + +/* ARGSUSED */ +static void +buf_dest(void *vbuf, void *unused) +{ + arc_buf_t *buf = vbuf; + + rw_destroy(&buf->b_lock); +} + +/* + * Reclaim callback -- invoked when memory is low. + */ +/* ARGSUSED */ +static void +hdr_recl(void *unused) +{ + dprintf("hdr_recl called\n"); + /* + * umem calls the reclaim func when we destroy the buf cache, + * which is after we do arc_fini(). + */ + if (!arc_dead) + cv_signal(&arc_reclaim_thr_cv); +} + +static void +buf_init(void) +{ + uint64_t *ct; + uint64_t hsize = 1ULL << 12; + int i, j; + + /* + * The hash table is big enough to fill all of physical memory + * with an average 64K block size. The table will take up + * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). + */ + while (hsize * 65536 < physmem * PAGESIZE) + hsize <<= 1; +retry: + buf_hash_table.ht_mask = hsize - 1; + buf_hash_table.ht_table = + kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); + if (buf_hash_table.ht_table == NULL) { + ASSERT(hsize > (1ULL << 8)); + hsize >>= 1; + goto retry; + } + + hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), + 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); + buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), + 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); + + for (i = 0; i < 256; i++) + for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) + *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); + + for (i = 0; i < BUF_LOCKS; i++) { + mutex_init(&buf_hash_table.ht_locks[i].ht_lock, + NULL, MUTEX_DEFAULT, NULL); + } +} + +#define ARC_MINTIME (hz>>4) /* 62 ms */ + +static void +arc_cksum_verify(arc_buf_t *buf) +{ + zio_cksum_t zc; + + if (!(zfs_flags & ZFS_DEBUG_MODIFY)) + return; + + mutex_enter(&buf->b_hdr->b_freeze_lock); + if (buf->b_hdr->b_freeze_cksum == NULL || + (buf->b_hdr->b_flags & ARC_IO_ERROR)) { + mutex_exit(&buf->b_hdr->b_freeze_lock); + return; + } + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); + if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) + panic("buffer modified while frozen!"); + mutex_exit(&buf->b_hdr->b_freeze_lock); +} + +static int +arc_cksum_equal(arc_buf_t *buf) +{ + zio_cksum_t zc; + int equal; + + mutex_enter(&buf->b_hdr->b_freeze_lock); + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); + equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); + mutex_exit(&buf->b_hdr->b_freeze_lock); + + return (equal); +} + +static void +arc_cksum_compute(arc_buf_t *buf, boolean_t force) +{ + if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) + return; + + mutex_enter(&buf->b_hdr->b_freeze_lock); + if (buf->b_hdr->b_freeze_cksum != NULL) { + mutex_exit(&buf->b_hdr->b_freeze_lock); + return; + } + buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, + buf->b_hdr->b_freeze_cksum); + mutex_exit(&buf->b_hdr->b_freeze_lock); +} + +void +arc_buf_thaw(arc_buf_t *buf) +{ + if (zfs_flags & ZFS_DEBUG_MODIFY) { + if (buf->b_hdr->b_state != arc_anon) + panic("modifying non-anon buffer!"); + if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) + panic("modifying buffer while i/o in progress!"); + arc_cksum_verify(buf); + } + + mutex_enter(&buf->b_hdr->b_freeze_lock); + if (buf->b_hdr->b_freeze_cksum != NULL) { + kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); + buf->b_hdr->b_freeze_cksum = NULL; + } + mutex_exit(&buf->b_hdr->b_freeze_lock); +} + +void +arc_buf_freeze(arc_buf_t *buf) +{ + if (!(zfs_flags & ZFS_DEBUG_MODIFY)) + return; + + ASSERT(buf->b_hdr->b_freeze_cksum != NULL || + buf->b_hdr->b_state == arc_anon); + arc_cksum_compute(buf, B_FALSE); +} + +static void +add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) +{ + ASSERT(MUTEX_HELD(hash_lock)); + + if ((refcount_add(&ab->b_refcnt, tag) == 1) && + (ab->b_state != arc_anon)) { + uint64_t delta = ab->b_size * ab->b_datacnt; + list_t *list = &ab->b_state->arcs_list[ab->b_type]; + uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; + + ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); + mutex_enter(&ab->b_state->arcs_mtx); + ASSERT(list_link_active(&ab->b_arc_node)); + list_remove(list, ab); + if (GHOST_STATE(ab->b_state)) { + ASSERT3U(ab->b_datacnt, ==, 0); + ASSERT3P(ab->b_buf, ==, NULL); + delta = ab->b_size; + } + ASSERT(delta > 0); + ASSERT3U(*size, >=, delta); + atomic_add_64(size, -delta); + mutex_exit(&ab->b_state->arcs_mtx); + /* remove the prefetch flag if we get a reference */ + if (ab->b_flags & ARC_PREFETCH) + ab->b_flags &= ~ARC_PREFETCH; + } +} + +static int +remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) +{ + int cnt; + arc_state_t *state = ab->b_state; + + ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); + ASSERT(!GHOST_STATE(state)); + + if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && + (state != arc_anon)) { + uint64_t *size = &state->arcs_lsize[ab->b_type]; + + ASSERT(!MUTEX_HELD(&state->arcs_mtx)); + mutex_enter(&state->arcs_mtx); + ASSERT(!list_link_active(&ab->b_arc_node)); + list_insert_head(&state->arcs_list[ab->b_type], ab); + ASSERT(ab->b_datacnt > 0); + atomic_add_64(size, ab->b_size * ab->b_datacnt); + mutex_exit(&state->arcs_mtx); + } + return (cnt); +} + +/* + * Move the supplied buffer to the indicated state. The mutex + * for the buffer must be held by the caller. + */ +static void +arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) +{ + arc_state_t *old_state = ab->b_state; + int64_t refcnt = refcount_count(&ab->b_refcnt); + uint64_t from_delta, to_delta; + + ASSERT(MUTEX_HELD(hash_lock)); + ASSERT(new_state != old_state); + ASSERT(refcnt == 0 || ab->b_datacnt > 0); + ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); + + from_delta = to_delta = ab->b_datacnt * ab->b_size; + + /* + * If this buffer is evictable, transfer it from the + * old state list to the new state list. + */ + if (refcnt == 0) { + if (old_state != arc_anon) { + int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); + uint64_t *size = &old_state->arcs_lsize[ab->b_type]; + + if (use_mutex) + mutex_enter(&old_state->arcs_mtx); + + ASSERT(list_link_active(&ab->b_arc_node)); + list_remove(&old_state->arcs_list[ab->b_type], ab); + + /* + * If prefetching out of the ghost cache, + * we will have a non-null datacnt. + */ + if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { + /* ghost elements have a ghost size */ + ASSERT(ab->b_buf == NULL); + from_delta = ab->b_size; + } + ASSERT3U(*size, >=, from_delta); + atomic_add_64(size, -from_delta); + + if (use_mutex) + mutex_exit(&old_state->arcs_mtx); + } + if (new_state != arc_anon) { + int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); + uint64_t *size = &new_state->arcs_lsize[ab->b_type]; + + if (use_mutex) + mutex_enter(&new_state->arcs_mtx); + + list_insert_head(&new_state->arcs_list[ab->b_type], ab); + + /* ghost elements have a ghost size */ + if (GHOST_STATE(new_state)) { + ASSERT(ab->b_datacnt == 0); + ASSERT(ab->b_buf == NULL); + to_delta = ab->b_size; + } + atomic_add_64(size, to_delta); + + if (use_mutex) + mutex_exit(&new_state->arcs_mtx); + } + } + + ASSERT(!BUF_EMPTY(ab)); + if (new_state == arc_anon) { + buf_hash_remove(ab); + } + + /* adjust state sizes */ + if (to_delta) + atomic_add_64(&new_state->arcs_size, to_delta); + if (from_delta) { + ASSERT3U(old_state->arcs_size, >=, from_delta); + atomic_add_64(&old_state->arcs_size, -from_delta); + } + ab->b_state = new_state; + + /* adjust l2arc hdr stats */ + if (new_state == arc_l2c_only) + l2arc_hdr_stat_add(); + else if (old_state == arc_l2c_only) + l2arc_hdr_stat_remove(); +} + +void +arc_space_consume(uint64_t space) +{ + atomic_add_64(&arc_meta_used, space); + atomic_add_64(&arc_size, space); +} + +void +arc_space_return(uint64_t space) +{ + ASSERT(arc_meta_used >= space); + if (arc_meta_max < arc_meta_used) + arc_meta_max = arc_meta_used; + atomic_add_64(&arc_meta_used, -space); + ASSERT(arc_size >= space); + atomic_add_64(&arc_size, -space); +} + +void * +arc_data_buf_alloc(uint64_t size) +{ + if (arc_evict_needed(ARC_BUFC_DATA)) + cv_signal(&arc_reclaim_thr_cv); + atomic_add_64(&arc_size, size); + return (zio_data_buf_alloc(size)); +} + +void +arc_data_buf_free(void *buf, uint64_t size) +{ + zio_data_buf_free(buf, size); + ASSERT(arc_size >= size); + atomic_add_64(&arc_size, -size); +} + +arc_buf_t * +arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) +{ + arc_buf_hdr_t *hdr; + arc_buf_t *buf; + + ASSERT3U(size, >, 0); + hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); + ASSERT(BUF_EMPTY(hdr)); + hdr->b_size = size; + hdr->b_type = type; + hdr->b_spa = spa; + hdr->b_state = arc_anon; + hdr->b_arc_access = 0; + buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); + buf->b_hdr = hdr; + buf->b_data = NULL; + buf->b_efunc = NULL; + buf->b_private = NULL; + buf->b_next = NULL; + hdr->b_buf = buf; + arc_get_data_buf(buf); + hdr->b_datacnt = 1; + hdr->b_flags = 0; + ASSERT(refcount_is_zero(&hdr->b_refcnt)); + (void) refcount_add(&hdr->b_refcnt, tag); + + return (buf); +} + +static arc_buf_t * +arc_buf_clone(arc_buf_t *from) +{ + arc_buf_t *buf; + arc_buf_hdr_t *hdr = from->b_hdr; + uint64_t size = hdr->b_size; + + buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); + buf->b_hdr = hdr; + buf->b_data = NULL; + buf->b_efunc = NULL; + buf->b_private = NULL; + buf->b_next = hdr->b_buf; + hdr->b_buf = buf; + arc_get_data_buf(buf); + bcopy(from->b_data, buf->b_data, size); + hdr->b_datacnt += 1; + return (buf); +} + +void +arc_buf_add_ref(arc_buf_t *buf, void* tag) +{ + arc_buf_hdr_t *hdr; + kmutex_t *hash_lock; + + /* + * Check to see if this buffer is evicted. Callers + * must verify b_data != NULL to know if the add_ref + * was successful. + */ + rw_enter(&buf->b_lock, RW_READER); + if (buf->b_data == NULL) { + rw_exit(&buf->b_lock); + return; + } + hdr = buf->b_hdr; + ASSERT(hdr != NULL); + hash_lock = HDR_LOCK(hdr); + mutex_enter(hash_lock); + rw_exit(&buf->b_lock); + + ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); + add_reference(hdr, hash_lock, tag); + arc_access(hdr, hash_lock); + mutex_exit(hash_lock); + ARCSTAT_BUMP(arcstat_hits); + ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), + demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, + data, metadata, hits); +} + +/* + * Free the arc data buffer. If it is an l2arc write in progress, + * the buffer is placed on l2arc_free_on_write to be freed later. + */ +static void +arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), + void *data, size_t size) +{ + if (HDR_L2_WRITING(hdr)) { + l2arc_data_free_t *df; + df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); + df->l2df_data = data; + df->l2df_size = size; + df->l2df_func = free_func; + mutex_enter(&l2arc_free_on_write_mtx); + list_insert_head(l2arc_free_on_write, df); + mutex_exit(&l2arc_free_on_write_mtx); + ARCSTAT_BUMP(arcstat_l2_free_on_write); + } else { + free_func(data, size); + } +} + +static void +arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) +{ + arc_buf_t **bufp; + + /* free up data associated with the buf */ + if (buf->b_data) { + arc_state_t *state = buf->b_hdr->b_state; + uint64_t size = buf->b_hdr->b_size; + arc_buf_contents_t type = buf->b_hdr->b_type; + + arc_cksum_verify(buf); + if (!recycle) { + if (type == ARC_BUFC_METADATA) { + arc_buf_data_free(buf->b_hdr, zio_buf_free, + buf->b_data, size); + arc_space_return(size); + } else { + ASSERT(type == ARC_BUFC_DATA); + arc_buf_data_free(buf->b_hdr, + zio_data_buf_free, buf->b_data, size); + atomic_add_64(&arc_size, -size); + } + } + if (list_link_active(&buf->b_hdr->b_arc_node)) { + uint64_t *cnt = &state->arcs_lsize[type]; + + ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); + ASSERT(state != arc_anon); + + ASSERT3U(*cnt, >=, size); + atomic_add_64(cnt, -size); + } + ASSERT3U(state->arcs_size, >=, size); + atomic_add_64(&state->arcs_size, -size); + buf->b_data = NULL; + ASSERT(buf->b_hdr->b_datacnt > 0); + buf->b_hdr->b_datacnt -= 1; + } + + /* only remove the buf if requested */ + if (!all) + return; + + /* remove the buf from the hdr list */ + for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) + continue; + *bufp = buf->b_next; + + ASSERT(buf->b_efunc == NULL); + + /* clean up the buf */ + buf->b_hdr = NULL; + kmem_cache_free(buf_cache, buf); +} + +static void +arc_hdr_destroy(arc_buf_hdr_t *hdr) +{ + ASSERT(refcount_is_zero(&hdr->b_refcnt)); + ASSERT3P(hdr->b_state, ==, arc_anon); + ASSERT(!HDR_IO_IN_PROGRESS(hdr)); + ASSERT(!(hdr->b_flags & ARC_STORED)); + + if (hdr->b_l2hdr != NULL) { + if (!MUTEX_HELD(&l2arc_buflist_mtx)) { + /* + * To prevent arc_free() and l2arc_evict() from + * attempting to free the same buffer at the same time, + * a FREE_IN_PROGRESS flag is given to arc_free() to + * give it priority. l2arc_evict() can't destroy this + * header while we are waiting on l2arc_buflist_mtx. + * + * The hdr may be removed from l2ad_buflist before we + * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. + */ + mutex_enter(&l2arc_buflist_mtx); + if (hdr->b_l2hdr != NULL) { + list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, + hdr); + } + mutex_exit(&l2arc_buflist_mtx); + } else { + list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); + } + ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); + kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); + if (hdr->b_state == arc_l2c_only) + l2arc_hdr_stat_remove(); + hdr->b_l2hdr = NULL; + } + + if (!BUF_EMPTY(hdr)) { + ASSERT(!HDR_IN_HASH_TABLE(hdr)); + bzero(&hdr->b_dva, sizeof (dva_t)); + hdr->b_birth = 0; + hdr->b_cksum0 = 0; + } + while (hdr->b_buf) { + arc_buf_t *buf = hdr->b_buf; + + if (buf->b_efunc) { + mutex_enter(&arc_eviction_mtx); + rw_enter(&buf->b_lock, RW_WRITER); + ASSERT(buf->b_hdr != NULL); + arc_buf_destroy(hdr->b_buf, FALSE, FALSE); + hdr->b_buf = buf->b_next; + buf->b_hdr = &arc_eviction_hdr; + buf->b_next = arc_eviction_list; + arc_eviction_list = buf; + rw_exit(&buf->b_lock); + mutex_exit(&arc_eviction_mtx); + } else { + arc_buf_destroy(hdr->b_buf, FALSE, TRUE); + } + } + if (hdr->b_freeze_cksum != NULL) { + kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); + hdr->b_freeze_cksum = NULL; + } + + ASSERT(!list_link_active(&hdr->b_arc_node)); + ASSERT3P(hdr->b_hash_next, ==, NULL); + ASSERT3P(hdr->b_acb, ==, NULL); + kmem_cache_free(hdr_cache, hdr); +} + +void +arc_buf_free(arc_buf_t *buf, void *tag) +{ + arc_buf_hdr_t *hdr = buf->b_hdr; + int hashed = hdr->b_state != arc_anon; + + ASSERT(buf->b_efunc == NULL); + ASSERT(buf->b_data != NULL); + + if (hashed) { + kmutex_t *hash_lock = HDR_LOCK(hdr); + + mutex_enter(hash_lock); + (void) remove_reference(hdr, hash_lock, tag); + if (hdr->b_datacnt > 1) + arc_buf_destroy(buf, FALSE, TRUE); + else + hdr->b_flags |= ARC_BUF_AVAILABLE; + mutex_exit(hash_lock); + } else if (HDR_IO_IN_PROGRESS(hdr)) { + int destroy_hdr; + /* + * We are in the middle of an async write. Don't destroy + * this buffer unless the write completes before we finish + * decrementing the reference count. + */ + mutex_enter(&arc_eviction_mtx); + (void) remove_reference(hdr, NULL, tag); + ASSERT(refcount_is_zero(&hdr->b_refcnt)); + destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); + mutex_exit(&arc_eviction_mtx); + if (destroy_hdr) + arc_hdr_destroy(hdr); + } else { + if (remove_reference(hdr, NULL, tag) > 0) { + ASSERT(HDR_IO_ERROR(hdr)); + arc_buf_destroy(buf, FALSE, TRUE); + } else { + arc_hdr_destroy(hdr); + } + } +} + +int +arc_buf_remove_ref(arc_buf_t *buf, void* tag) +{ + arc_buf_hdr_t *hdr = buf->b_hdr; + kmutex_t *hash_lock = HDR_LOCK(hdr); + int no_callback = (buf->b_efunc == NULL); + + if (hdr->b_state == arc_anon) { + arc_buf_free(buf, tag); + return (no_callback); + } + + mutex_enter(hash_lock); + ASSERT(hdr->b_state != arc_anon); + ASSERT(buf->b_data != NULL); + + (void) remove_reference(hdr, hash_lock, tag); + if (hdr->b_datacnt > 1) { + if (no_callback) + arc_buf_destroy(buf, FALSE, TRUE); + } else if (no_callback) { + ASSERT(hdr->b_buf == buf && buf->b_next == NULL); + hdr->b_flags |= ARC_BUF_AVAILABLE; + } + ASSERT(no_callback || hdr->b_datacnt > 1 || + refcount_is_zero(&hdr->b_refcnt)); + mutex_exit(hash_lock); + return (no_callback); +} + +int +arc_buf_size(arc_buf_t *buf) +{ + return (buf->b_hdr->b_size); +} + +/* + * Evict buffers from list until we've removed the specified number of + * bytes. Move the removed buffers to the appropriate evict state. + * If the recycle flag is set, then attempt to "recycle" a buffer: + * - look for a buffer to evict that is `bytes' long. + * - return the data block from this buffer rather than freeing it. + * This flag is used by callers that are trying to make space for a + * new buffer in a full arc cache. + * + * This function makes a "best effort". It skips over any buffers + * it can't get a hash_lock on, and so may not catch all candidates. + * It may also return without evicting as much space as requested. + */ +static void * +arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, + arc_buf_contents_t type) +{ + arc_state_t *evicted_state; + uint64_t bytes_evicted = 0, skipped = 0, missed = 0; + arc_buf_hdr_t *ab, *ab_prev = NULL; + list_t *list = &state->arcs_list[type]; + kmutex_t *hash_lock; + boolean_t have_lock; + void *stolen = NULL; + + ASSERT(state == arc_mru || state == arc_mfu); + + evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; + + mutex_enter(&state->arcs_mtx); + mutex_enter(&evicted_state->arcs_mtx); + + for (ab = list_tail(list); ab; ab = ab_prev) { + ab_prev = list_prev(list, ab); + /* prefetch buffers have a minimum lifespan */ + if (HDR_IO_IN_PROGRESS(ab) || + (spa && ab->b_spa != spa) || + (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && + lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { + skipped++; + continue; + } + /* "lookahead" for better eviction candidate */ + if (recycle && ab->b_size != bytes && + ab_prev && ab_prev->b_size == bytes) + continue; + hash_lock = HDR_LOCK(ab); + have_lock = MUTEX_HELD(hash_lock); + if (have_lock || mutex_tryenter(hash_lock)) { + ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); + ASSERT(ab->b_datacnt > 0); + while (ab->b_buf) { + arc_buf_t *buf = ab->b_buf; + if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { + missed += 1; + break; + } + if (buf->b_data) { + bytes_evicted += ab->b_size; + if (recycle && ab->b_type == type && + ab->b_size == bytes && + !HDR_L2_WRITING(ab)) { + stolen = buf->b_data; + recycle = FALSE; + } + } + if (buf->b_efunc) { + mutex_enter(&arc_eviction_mtx); + arc_buf_destroy(buf, + buf->b_data == stolen, FALSE); + ab->b_buf = buf->b_next; + buf->b_hdr = &arc_eviction_hdr; + buf->b_next = arc_eviction_list; + arc_eviction_list = buf; + mutex_exit(&arc_eviction_mtx); + rw_exit(&buf->b_lock); + } else { + rw_exit(&buf->b_lock); + arc_buf_destroy(buf, + buf->b_data == stolen, TRUE); + } + } + if (ab->b_datacnt == 0) { + arc_change_state(evicted_state, ab, hash_lock); + ASSERT(HDR_IN_HASH_TABLE(ab)); + ab->b_flags |= ARC_IN_HASH_TABLE; + ab->b_flags &= ~ARC_BUF_AVAILABLE; + DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); + } + if (!have_lock) + mutex_exit(hash_lock); + if (bytes >= 0 && bytes_evicted >= bytes) + break; + } else { + missed += 1; + } + } + + mutex_exit(&evicted_state->arcs_mtx); + mutex_exit(&state->arcs_mtx); + + if (bytes_evicted < bytes) + dprintf("only evicted %lld bytes from %x", + (longlong_t)bytes_evicted, state); + + if (skipped) + ARCSTAT_INCR(arcstat_evict_skip, skipped); + + if (missed) + ARCSTAT_INCR(arcstat_mutex_miss, missed); + + /* + * We have just evicted some date into the ghost state, make + * sure we also adjust the ghost state size if necessary. + */ + if (arc_no_grow && + arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { + int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + + arc_mru_ghost->arcs_size - arc_c; + + if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { + int64_t todelete = + MIN(arc_mru_ghost->arcs_lsize[type], mru_over); + arc_evict_ghost(arc_mru_ghost, NULL, todelete); + } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { + int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], + arc_mru_ghost->arcs_size + + arc_mfu_ghost->arcs_size - arc_c); + arc_evict_ghost(arc_mfu_ghost, NULL, todelete); + } + } + + return (stolen); +} + +/* + * Remove buffers from list until we've removed the specified number of + * bytes. Destroy the buffers that are removed. + */ +static void +arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) +{ + arc_buf_hdr_t *ab, *ab_prev; + list_t *list = &state->arcs_list[ARC_BUFC_DATA]; + kmutex_t *hash_lock; + uint64_t bytes_deleted = 0; + uint64_t bufs_skipped = 0; + + ASSERT(GHOST_STATE(state)); +top: + mutex_enter(&state->arcs_mtx); + for (ab = list_tail(list); ab; ab = ab_prev) { + ab_prev = list_prev(list, ab); + if (spa && ab->b_spa != spa) + continue; + hash_lock = HDR_LOCK(ab); + if (mutex_tryenter(hash_lock)) { + ASSERT(!HDR_IO_IN_PROGRESS(ab)); + ASSERT(ab->b_buf == NULL); + ARCSTAT_BUMP(arcstat_deleted); + bytes_deleted += ab->b_size; + + if (ab->b_l2hdr != NULL) { + /* + * This buffer is cached on the 2nd Level ARC; + * don't destroy the header. + */ + arc_change_state(arc_l2c_only, ab, hash_lock); + mutex_exit(hash_lock); + } else { + arc_change_state(arc_anon, ab, hash_lock); + mutex_exit(hash_lock); + arc_hdr_destroy(ab); + } + + DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); + if (bytes >= 0 && bytes_deleted >= bytes) + break; + } else { + if (bytes < 0) { + mutex_exit(&state->arcs_mtx); + mutex_enter(hash_lock); + mutex_exit(hash_lock); + goto top; + } + bufs_skipped += 1; + } + } + mutex_exit(&state->arcs_mtx); + + if (list == &state->arcs_list[ARC_BUFC_DATA] && + (bytes < 0 || bytes_deleted < bytes)) { + list = &state->arcs_list[ARC_BUFC_METADATA]; + goto top; + } + + if (bufs_skipped) { + ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); + ASSERT(bytes >= 0); + } + + if (bytes_deleted < bytes) + dprintf("only deleted %lld bytes from %p", + (longlong_t)bytes_deleted, state); +} + +static void +arc_adjust(void) +{ + int64_t top_sz, mru_over, arc_over, todelete; + + top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; + + if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { + int64_t toevict = + MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); + (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); + top_sz = arc_anon->arcs_size + arc_mru->arcs_size; + } + + if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { + int64_t toevict = + MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); + (void) arc_evict(arc_mru, NULL, toevict, FALSE, + ARC_BUFC_METADATA); + top_sz = arc_anon->arcs_size + arc_mru->arcs_size; + } + + mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; + + if (mru_over > 0) { + if (arc_mru_ghost->arcs_size > 0) { + todelete = MIN(arc_mru_ghost->arcs_size, mru_over); + arc_evict_ghost(arc_mru_ghost, NULL, todelete); + } + } + + if ((arc_over = arc_size - arc_c) > 0) { + int64_t tbl_over; + + if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { + int64_t toevict = + MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); + (void) arc_evict(arc_mfu, NULL, toevict, FALSE, + ARC_BUFC_DATA); + arc_over = arc_size - arc_c; + } + + if (arc_over > 0 && + arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { + int64_t toevict = + MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], + arc_over); + (void) arc_evict(arc_mfu, NULL, toevict, FALSE, + ARC_BUFC_METADATA); + } + + tbl_over = arc_size + arc_mru_ghost->arcs_size + + arc_mfu_ghost->arcs_size - arc_c * 2; + + if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { + todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); + arc_evict_ghost(arc_mfu_ghost, NULL, todelete); + } + } +} + +static void +arc_do_user_evicts(void) +{ + mutex_enter(&arc_eviction_mtx); + while (arc_eviction_list != NULL) { + arc_buf_t *buf = arc_eviction_list; + arc_eviction_list = buf->b_next; + rw_enter(&buf->b_lock, RW_WRITER); + buf->b_hdr = NULL; + rw_exit(&buf->b_lock); + mutex_exit(&arc_eviction_mtx); + + if (buf->b_efunc != NULL) + VERIFY(buf->b_efunc(buf) == 0); + + buf->b_efunc = NULL; + buf->b_private = NULL; + kmem_cache_free(buf_cache, buf); + mutex_enter(&arc_eviction_mtx); + } + mutex_exit(&arc_eviction_mtx); +} + +/* + * Flush all *evictable* data from the cache for the given spa. + * NOTE: this will not touch "active" (i.e. referenced) data. + */ +void +arc_flush(spa_t *spa) +{ + while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { + (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); + if (spa) + break; + } + while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { + (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); + if (spa) + break; + } + while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { + (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); + if (spa) + break; + } + while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { + (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); + if (spa) + break; + } + + arc_evict_ghost(arc_mru_ghost, spa, -1); + arc_evict_ghost(arc_mfu_ghost, spa, -1); + + mutex_enter(&arc_reclaim_thr_lock); + arc_do_user_evicts(); + mutex_exit(&arc_reclaim_thr_lock); + ASSERT(spa || arc_eviction_list == NULL); +} + +int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ + +void +arc_shrink(void) +{ + if (arc_c > arc_c_min) { + uint64_t to_free; + +#ifdef _KERNEL + to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); +#else + to_free = arc_c >> arc_shrink_shift; +#endif + if (arc_c > arc_c_min + to_free) + atomic_add_64(&arc_c, -to_free); + else + arc_c = arc_c_min; + + atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); + if (arc_c > arc_size) + arc_c = MAX(arc_size, arc_c_min); + if (arc_p > arc_c) + arc_p = (arc_c >> 1); + ASSERT(arc_c >= arc_c_min); + ASSERT((int64_t)arc_p >= 0); + } + + if (arc_size > arc_c) + arc_adjust(); +} + +static int +arc_reclaim_needed(void) +{ + uint64_t extra; + +#ifdef _KERNEL + + if (needfree) + return (1); + + /* + * take 'desfree' extra pages, so we reclaim sooner, rather than later + */ + extra = desfree; + + /* + * check that we're out of range of the pageout scanner. It starts to + * schedule paging if freemem is less than lotsfree and needfree. + * lotsfree is the high-water mark for pageout, and needfree is the + * number of needed free pages. We add extra pages here to make sure + * the scanner doesn't start up while we're freeing memory. + */ + if (freemem < lotsfree + needfree + extra) + return (1); + + /* + * check to make sure that swapfs has enough space so that anon + * reservations can still succeed. anon_resvmem() checks that the + * availrmem is greater than swapfs_minfree, and the number of reserved + * swap pages. We also add a bit of extra here just to prevent + * circumstances from getting really dire. + */ + if (availrmem < swapfs_minfree + swapfs_reserve + extra) + return (1); + +#if defined(__i386) + /* + * If we're on an i386 platform, it's possible that we'll exhaust the + * kernel heap space before we ever run out of available physical + * memory. Most checks of the size of the heap_area compare against + * tune.t_minarmem, which is the minimum available real memory that we + * can have in the system. However, this is generally fixed at 25 pages + * which is so low that it's useless. In this comparison, we seek to + * calculate the total heap-size, and reclaim if more than 3/4ths of the + * heap is allocated. (Or, in the calculation, if less than 1/4th is + * free) + */ + if (btop(vmem_size(heap_arena, VMEM_FREE)) < + (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) + return (1); +#endif + +#else + if (spa_get_random(100) == 0) + return (1); +#endif + return (0); +} + +static void +arc_kmem_reap_now(arc_reclaim_strategy_t strat) +{ + size_t i; + kmem_cache_t *prev_cache = NULL; + kmem_cache_t *prev_data_cache = NULL; + extern kmem_cache_t *zio_buf_cache[]; + extern kmem_cache_t *zio_data_buf_cache[]; + +#ifdef _KERNEL + if (arc_meta_used >= arc_meta_limit) { + /* + * We are exceeding our meta-data cache limit. + * Purge some DNLC entries to release holds on meta-data. + */ + dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); + } +#if defined(__i386) + /* + * Reclaim unused memory from all kmem caches. + */ + kmem_reap(); +#endif +#endif + + /* + * An aggressive reclamation will shrink the cache size as well as + * reap free buffers from the arc kmem caches. + */ + if (strat == ARC_RECLAIM_AGGR) + arc_shrink(); + + for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { + if (zio_buf_cache[i] != prev_cache) { + prev_cache = zio_buf_cache[i]; + kmem_cache_reap_now(zio_buf_cache[i]); + } + if (zio_data_buf_cache[i] != prev_data_cache) { + prev_data_cache = zio_data_buf_cache[i]; + kmem_cache_reap_now(zio_data_buf_cache[i]); + } + } + kmem_cache_reap_now(buf_cache); + kmem_cache_reap_now(hdr_cache); +} + +static void +arc_reclaim_thread(void) +{ + clock_t growtime = 0; + arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; + callb_cpr_t cpr; + + CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); + + mutex_enter(&arc_reclaim_thr_lock); + while (arc_thread_exit == 0) { + if (arc_reclaim_needed()) { + + if (arc_no_grow) { + if (last_reclaim == ARC_RECLAIM_CONS) { + last_reclaim = ARC_RECLAIM_AGGR; + } else { + last_reclaim = ARC_RECLAIM_CONS; + } + } else { + arc_no_grow = TRUE; + last_reclaim = ARC_RECLAIM_AGGR; + membar_producer(); + } + + /* reset the growth delay for every reclaim */ + growtime = lbolt + (arc_grow_retry * hz); + + arc_kmem_reap_now(last_reclaim); + arc_warm = B_TRUE; + + } else if (arc_no_grow && lbolt >= growtime) { + arc_no_grow = FALSE; + } + + if (2 * arc_c < arc_size + + arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) + arc_adjust(); + + if (arc_eviction_list != NULL) + arc_do_user_evicts(); + + /* block until needed, or one second, whichever is shorter */ + CALLB_CPR_SAFE_BEGIN(&cpr); + (void) cv_timedwait(&arc_reclaim_thr_cv, + &arc_reclaim_thr_lock, (lbolt + hz)); + CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); + } + + arc_thread_exit = 0; + cv_broadcast(&arc_reclaim_thr_cv); + CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ + thread_exit(); +} + +/* + * Adapt arc info given the number of bytes we are trying to add and + * the state that we are comming from. This function is only called + * when we are adding new content to the cache. + */ +static void +arc_adapt(int bytes, arc_state_t *state) +{ + int mult; + + if (state == arc_l2c_only) + return; + + ASSERT(bytes > 0); + /* + * Adapt the target size of the MRU list: + * - if we just hit in the MRU ghost list, then increase + * the target size of the MRU list. + * - if we just hit in the MFU ghost list, then increase + * the target size of the MFU list by decreasing the + * target size of the MRU list. + */ + if (state == arc_mru_ghost) { + mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? + 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); + + arc_p = MIN(arc_c, arc_p + bytes * mult); + } else if (state == arc_mfu_ghost) { + mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? + 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); + + arc_p = MAX(0, (int64_t)arc_p - bytes * mult); + } + ASSERT((int64_t)arc_p >= 0); + + if (arc_reclaim_needed()) { + cv_signal(&arc_reclaim_thr_cv); + return; + } + + if (arc_no_grow) + return; + + if (arc_c >= arc_c_max) + return; + + /* + * If we're within (2 * maxblocksize) bytes of the target + * cache size, increment the target cache size + */ + if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { + atomic_add_64(&arc_c, (int64_t)bytes); + if (arc_c > arc_c_max) + arc_c = arc_c_max; + else if (state == arc_anon) + atomic_add_64(&arc_p, (int64_t)bytes); + if (arc_p > arc_c) + arc_p = arc_c; + } + ASSERT((int64_t)arc_p >= 0); +} + +/* + * Check if the cache has reached its limits and eviction is required + * prior to insert. + */ +static int +arc_evict_needed(arc_buf_contents_t type) +{ + if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) + return (1); + +#ifdef _KERNEL + /* + * If zio data pages are being allocated out of a separate heap segment, + * then enforce that the size of available vmem for this area remains + * above about 1/32nd free. + */ + if (type == ARC_BUFC_DATA && zio_arena != NULL && + vmem_size(zio_arena, VMEM_FREE) < + (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) + return (1); +#endif + + if (arc_reclaim_needed()) + return (1); + + return (arc_size > arc_c); +} + +/* + * The buffer, supplied as the first argument, needs a data block. + * So, if we are at cache max, determine which cache should be victimized. + * We have the following cases: + * + * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> + * In this situation if we're out of space, but the resident size of the MFU is + * under the limit, victimize the MFU cache to satisfy this insertion request. + * + * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> + * Here, we've used up all of the available space for the MRU, so we need to + * evict from our own cache instead. Evict from the set of resident MRU + * entries. + * + * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> + * c minus p represents the MFU space in the cache, since p is the size of the + * cache that is dedicated to the MRU. In this situation there's still space on + * the MFU side, so the MRU side needs to be victimized. + * + * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> + * MFU's resident set is consuming more space than it has been allotted. In + * this situation, we must victimize our own cache, the MFU, for this insertion. + */ +static void +arc_get_data_buf(arc_buf_t *buf) +{ + arc_state_t *state = buf->b_hdr->b_state; + uint64_t size = buf->b_hdr->b_size; + arc_buf_contents_t type = buf->b_hdr->b_type; + + arc_adapt(size, state); + + /* + * We have not yet reached cache maximum size, + * just allocate a new buffer. + */ + if (!arc_evict_needed(type)) { + if (type == ARC_BUFC_METADATA) { + buf->b_data = zio_buf_alloc(size); + arc_space_consume(size); + } else { + ASSERT(type == ARC_BUFC_DATA); + buf->b_data = zio_data_buf_alloc(size); + atomic_add_64(&arc_size, size); + } + goto out; + } + + /* + * If we are prefetching from the mfu ghost list, this buffer + * will end up on the mru list; so steal space from there. + */ + if (state == arc_mfu_ghost) + state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; + else if (state == arc_mru_ghost) + state = arc_mru; + + if (state == arc_mru || state == arc_anon) { + uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; + state = (arc_mfu->arcs_lsize[type] > 0 && + arc_p > mru_used) ? arc_mfu : arc_mru; + } else { + /* MFU cases */ + uint64_t mfu_space = arc_c - arc_p; + state = (arc_mru->arcs_lsize[type] > 0 && + mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; + } + if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { + if (type == ARC_BUFC_METADATA) { + buf->b_data = zio_buf_alloc(size); + arc_space_consume(size); + } else { + ASSERT(type == ARC_BUFC_DATA); + buf->b_data = zio_data_buf_alloc(size); + atomic_add_64(&arc_size, size); + } + ARCSTAT_BUMP(arcstat_recycle_miss); + } + ASSERT(buf->b_data != NULL); +out: + /* + * Update the state size. Note that ghost states have a + * "ghost size" and so don't need to be updated. + */ + if (!GHOST_STATE(buf->b_hdr->b_state)) { + arc_buf_hdr_t *hdr = buf->b_hdr; + + atomic_add_64(&hdr->b_state->arcs_size, size); + if (list_link_active(&hdr->b_arc_node)) { + ASSERT(refcount_is_zero(&hdr->b_refcnt)); + atomic_add_64(&hdr->b_state->arcs_lsize[type], size); + } + /* + * If we are growing the cache, and we are adding anonymous + * data, and we have outgrown arc_p, update arc_p + */ + if (arc_size < arc_c && hdr->b_state == arc_anon && + arc_anon->arcs_size + arc_mru->arcs_size > arc_p) + arc_p = MIN(arc_c, arc_p + size); + } +} + +/* + * This routine is called whenever a buffer is accessed. + * NOTE: the hash lock is dropped in this function. + */ +static void +arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) +{ + ASSERT(MUTEX_HELD(hash_lock)); + + if (buf->b_state == arc_anon) { + /* + * This buffer is not in the cache, and does not + * appear in our "ghost" list. Add the new buffer + * to the MRU state. + */ + + ASSERT(buf->b_arc_access == 0); + buf->b_arc_access = lbolt; + DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); + arc_change_state(arc_mru, buf, hash_lock); + + } else if (buf->b_state == arc_mru) { + /* + * If this buffer is here because of a prefetch, then either: + * - clear the flag if this is a "referencing" read + * (any subsequent access will bump this into the MFU state). + * or + * - move the buffer to the head of the list if this is + * another prefetch (to make it less likely to be evicted). + */ + if ((buf->b_flags & ARC_PREFETCH) != 0) { + if (refcount_count(&buf->b_refcnt) == 0) { + ASSERT(list_link_active(&buf->b_arc_node)); + } else { + buf->b_flags &= ~ARC_PREFETCH; + ARCSTAT_BUMP(arcstat_mru_hits); + } + buf->b_arc_access = lbolt; + return; + } + + /* + * This buffer has been "accessed" only once so far, + * but it is still in the cache. Move it to the MFU + * state. + */ + if (lbolt > buf->b_arc_access + ARC_MINTIME) { + /* + * More than 125ms have passed since we + * instantiated this buffer. Move it to the + * most frequently used state. + */ + buf->b_arc_access = lbolt; + DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); + arc_change_state(arc_mfu, buf, hash_lock); + } + ARCSTAT_BUMP(arcstat_mru_hits); + } else if (buf->b_state == arc_mru_ghost) { + arc_state_t *new_state; + /* + * This buffer has been "accessed" recently, but + * was evicted from the cache. Move it to the + * MFU state. + */ + + if (buf->b_flags & ARC_PREFETCH) { + new_state = arc_mru; + if (refcount_count(&buf->b_refcnt) > 0) + buf->b_flags &= ~ARC_PREFETCH; + DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); + } else { + new_state = arc_mfu; + DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); + } + + buf->b_arc_access = lbolt; + arc_change_state(new_state, buf, hash_lock); + + ARCSTAT_BUMP(arcstat_mru_ghost_hits); + } else if (buf->b_state == arc_mfu) { + /* + * This buffer has been accessed more than once and is + * still in the cache. Keep it in the MFU state. + * + * NOTE: an add_reference() that occurred when we did + * the arc_read() will have kicked this off the list. + * If it was a prefetch, we will explicitly move it to + * the head of the list now. + */ + if ((buf->b_flags & ARC_PREFETCH) != 0) { + ASSERT(refcount_count(&buf->b_refcnt) == 0); + ASSERT(list_link_active(&buf->b_arc_node)); + } + ARCSTAT_BUMP(arcstat_mfu_hits); + buf->b_arc_access = lbolt; + } else if (buf->b_state == arc_mfu_ghost) { + arc_state_t *new_state = arc_mfu; + /* + * This buffer has been accessed more than once but has + * been evicted from the cache. Move it back to the + * MFU state. + */ + + if (buf->b_flags & ARC_PREFETCH) { + /* + * This is a prefetch access... + * move this block back to the MRU state. + */ + ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); + new_state = arc_mru; + } + + buf->b_arc_access = lbolt; + DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); + arc_change_state(new_state, buf, hash_lock); + + ARCSTAT_BUMP(arcstat_mfu_ghost_hits); + } else if (buf->b_state == arc_l2c_only) { + /* + * This buffer is on the 2nd Level ARC. + */ + + buf->b_arc_access = lbolt; + DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); + arc_change_state(arc_mfu, buf, hash_lock); + } else { + ASSERT(!"invalid arc state"); + } +} + +/* a generic arc_done_func_t which you can use */ +/* ARGSUSED */ +void +arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) +{ + bcopy(buf->b_data, arg, buf->b_hdr->b_size); + VERIFY(arc_buf_remove_ref(buf, arg) == 1); +} + +/* a generic arc_done_func_t */ +void +arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) +{ + arc_buf_t **bufp = arg; + if (zio && zio->io_error) { + VERIFY(arc_buf_remove_ref(buf, arg) == 1); + *bufp = NULL; + } else { + *bufp = buf; + } +} + +static void +arc_read_done(zio_t *zio) +{ + arc_buf_hdr_t *hdr, *found; + arc_buf_t *buf; + arc_buf_t *abuf; /* buffer we're assigning to callback */ + kmutex_t *hash_lock; + arc_callback_t *callback_list, *acb; + int freeable = FALSE; + + buf = zio->io_private; + hdr = buf->b_hdr; + + /* + * The hdr was inserted into hash-table and removed from lists + * prior to starting I/O. We should find this header, since + * it's in the hash table, and it should be legit since it's + * not possible to evict it during the I/O. The only possible + * reason for it not to be found is if we were freed during the + * read. + */ + found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, + &hash_lock); + + ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || + (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || + (found == hdr && HDR_L2_READING(hdr))); + + hdr->b_flags &= ~ARC_L2_EVICTED; + if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) + hdr->b_flags &= ~ARC_L2CACHE; + + /* byteswap if necessary */ + callback_list = hdr->b_acb; + ASSERT(callback_list != NULL); + if (BP_SHOULD_BYTESWAP(zio->io_bp)) { + arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? + byteswap_uint64_array : + dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; + func(buf->b_data, hdr->b_size); + } + + arc_cksum_compute(buf, B_FALSE); + + /* create copies of the data buffer for the callers */ + abuf = buf; + for (acb = callback_list; acb; acb = acb->acb_next) { + if (acb->acb_done) { + if (abuf == NULL) + abuf = arc_buf_clone(buf); + acb->acb_buf = abuf; + abuf = NULL; + } + } + hdr->b_acb = NULL; + hdr->b_flags &= ~ARC_IO_IN_PROGRESS; + ASSERT(!HDR_BUF_AVAILABLE(hdr)); + if (abuf == buf) + hdr->b_flags |= ARC_BUF_AVAILABLE; + + ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); + + if (zio->io_error != 0) { + hdr->b_flags |= ARC_IO_ERROR; + if (hdr->b_state != arc_anon) + arc_change_state(arc_anon, hdr, hash_lock); + if (HDR_IN_HASH_TABLE(hdr)) + buf_hash_remove(hdr); + freeable = refcount_is_zero(&hdr->b_refcnt); + } + + /* + * Broadcast before we drop the hash_lock to avoid the possibility + * that the hdr (and hence the cv) might be freed before we get to + * the cv_broadcast(). + */ + cv_broadcast(&hdr->b_cv); + + if (hash_lock) { + /* + * Only call arc_access on anonymous buffers. This is because + * if we've issued an I/O for an evicted buffer, we've already + * called arc_access (to prevent any simultaneous readers from + * getting confused). + */ + if (zio->io_error == 0 && hdr->b_state == arc_anon) + arc_access(hdr, hash_lock); + mutex_exit(hash_lock); + } else { + /* + * This block was freed while we waited for the read to + * complete. It has been removed from the hash table and + * moved to the anonymous state (so that it won't show up + * in the cache). + */ + ASSERT3P(hdr->b_state, ==, arc_anon); + freeable = refcount_is_zero(&hdr->b_refcnt); + } + + /* execute each callback and free its structure */ + while ((acb = callback_list) != NULL) { + if (acb->acb_done) + acb->acb_done(zio, acb->acb_buf, acb->acb_private); + + if (acb->acb_zio_dummy != NULL) { + acb->acb_zio_dummy->io_error = zio->io_error; + zio_nowait(acb->acb_zio_dummy); + } + + callback_list = acb->acb_next; + kmem_free(acb, sizeof (arc_callback_t)); + } + + if (freeable) + arc_hdr_destroy(hdr); +} + +/* + * "Read" the block block at the specified DVA (in bp) via the + * cache. If the block is found in the cache, invoke the provided + * callback immediately and return. Note that the `zio' parameter + * in the callback will be NULL in this case, since no IO was + * required. If the block is not in the cache pass the read request + * on to the spa with a substitute callback function, so that the + * requested block will be added to the cache. + * + * If a read request arrives for a block that has a read in-progress, + * either wait for the in-progress read to complete (and return the + * results); or, if this is a read with a "done" func, add a record + * to the read to invoke the "done" func when the read completes, + * and return; or just return. + * + * arc_read_done() will invoke all the requested "done" functions + * for readers of this block. + * + * Normal callers should use arc_read and pass the arc buffer and offset + * for the bp. But if you know you don't need locking, you can use + * arc_read_nolock. Callers cannot use a "done" function in a prefetch + * call (i.e., with ARC_NOWAIT set). + */ +int +arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, + arc_done_func_t *done, void *private, int priority, int zio_flags, + uint32_t *arc_flags, const zbookmark_t *zb) +{ + int err; + arc_buf_hdr_t *hdr = pbuf->b_hdr; + + ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); + ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); + rw_enter(&pbuf->b_lock, RW_READER); + + err = arc_read_nolock(pio, spa, bp, done, private, priority, + zio_flags, arc_flags, zb); + + ASSERT3P(hdr, ==, pbuf->b_hdr); + rw_exit(&pbuf->b_lock); + return (err); +} + +int +arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, + arc_done_func_t *done, void *private, int priority, int zio_flags, + uint32_t *arc_flags, const zbookmark_t *zb) +{ + arc_buf_hdr_t *hdr; + arc_buf_t *buf; + kmutex_t *hash_lock; + zio_t *rzio; + +top: + hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); + if (hdr && hdr->b_datacnt > 0) { + + *arc_flags |= ARC_CACHED; + + if (HDR_IO_IN_PROGRESS(hdr)) { + + if (*arc_flags & ARC_WAIT) { + cv_wait(&hdr->b_cv, hash_lock); + mutex_exit(hash_lock); + goto top; + } + ASSERT(*arc_flags & ARC_NOWAIT); + + if (done) { + arc_callback_t *acb = NULL; + + acb = kmem_zalloc(sizeof (arc_callback_t), + KM_SLEEP); + acb->acb_done = done; + acb->acb_private = private; + if (pio != NULL) + acb->acb_zio_dummy = zio_null(pio, + spa, NULL, NULL, zio_flags); + + ASSERT(acb->acb_done != NULL); + acb->acb_next = hdr->b_acb; + hdr->b_acb = acb; + add_reference(hdr, hash_lock, private); + mutex_exit(hash_lock); + return (0); + } + mutex_exit(hash_lock); + return (0); + } + + ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); + + if (done) { + add_reference(hdr, hash_lock, private); + /* + * If this block is already in use, create a new + * copy of the data so that we will be guaranteed + * that arc_release() will always succeed. + */ + buf = hdr->b_buf; + ASSERT(buf); + ASSERT(buf->b_data); + if (HDR_BUF_AVAILABLE(hdr)) { + ASSERT(buf->b_efunc == NULL); + hdr->b_flags &= ~ARC_BUF_AVAILABLE; + } else { + buf = arc_buf_clone(buf); + } + } else if (*arc_flags & ARC_PREFETCH && + refcount_count(&hdr->b_refcnt) == 0) { + hdr->b_flags |= ARC_PREFETCH; + } + DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); + arc_access(hdr, hash_lock); + if (*arc_flags & ARC_L2CACHE) + hdr->b_flags |= ARC_L2CACHE; + mutex_exit(hash_lock); + ARCSTAT_BUMP(arcstat_hits); + ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), + demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, + data, metadata, hits); + + if (done) + done(NULL, buf, private); + } else { + uint64_t size = BP_GET_LSIZE(bp); + arc_callback_t *acb; + vdev_t *vd = NULL; + daddr_t addr; + + if (hdr == NULL) { + /* this block is not in the cache */ + arc_buf_hdr_t *exists; + arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); + buf = arc_buf_alloc(spa, size, private, type); + hdr = buf->b_hdr; + hdr->b_dva = *BP_IDENTITY(bp); + hdr->b_birth = bp->blk_birth; + hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; + exists = buf_hash_insert(hdr, &hash_lock); + if (exists) { + /* somebody beat us to the hash insert */ + mutex_exit(hash_lock); + bzero(&hdr->b_dva, sizeof (dva_t)); + hdr->b_birth = 0; + hdr->b_cksum0 = 0; + (void) arc_buf_remove_ref(buf, private); + goto top; /* restart the IO request */ + } + /* if this is a prefetch, we don't have a reference */ + if (*arc_flags & ARC_PREFETCH) { + (void) remove_reference(hdr, hash_lock, + private); + hdr->b_flags |= ARC_PREFETCH; + } + if (*arc_flags & ARC_L2CACHE) + hdr->b_flags |= ARC_L2CACHE; + if (BP_GET_LEVEL(bp) > 0) + hdr->b_flags |= ARC_INDIRECT; + } else { + /* this block is in the ghost cache */ + ASSERT(GHOST_STATE(hdr->b_state)); + ASSERT(!HDR_IO_IN_PROGRESS(hdr)); + ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); + ASSERT(hdr->b_buf == NULL); + + /* if this is a prefetch, we don't have a reference */ + if (*arc_flags & ARC_PREFETCH) + hdr->b_flags |= ARC_PREFETCH; + else + add_reference(hdr, hash_lock, private); + if (*arc_flags & ARC_L2CACHE) + hdr->b_flags |= ARC_L2CACHE; + buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); + buf->b_hdr = hdr; + buf->b_data = NULL; + buf->b_efunc = NULL; + buf->b_private = NULL; + buf->b_next = NULL; + hdr->b_buf = buf; + arc_get_data_buf(buf); + ASSERT(hdr->b_datacnt == 0); + hdr->b_datacnt = 1; + + } + + acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); + acb->acb_done = done; + acb->acb_private = private; + + ASSERT(hdr->b_acb == NULL); + hdr->b_acb = acb; + hdr->b_flags |= ARC_IO_IN_PROGRESS; + + /* + * If the buffer has been evicted, migrate it to a present state + * before issuing the I/O. Once we drop the hash-table lock, + * the header will be marked as I/O in progress and have an + * attached buffer. At this point, anybody who finds this + * buffer ought to notice that it's legit but has a pending I/O. + */ + + if (GHOST_STATE(hdr->b_state)) + arc_access(hdr, hash_lock); + + if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && + (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { + addr = hdr->b_l2hdr->b_daddr; + /* + * Lock out device removal. + */ + if (vdev_is_dead(vd) || + !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) + vd = NULL; + } + + mutex_exit(hash_lock); + + ASSERT3U(hdr->b_size, ==, size); + DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, + zbookmark_t *, zb); + ARCSTAT_BUMP(arcstat_misses); + ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), + demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, + data, metadata, misses); + + if (vd != NULL) { + /* + * Read from the L2ARC if the following are true: + * 1. The L2ARC vdev was previously cached. + * 2. This buffer still has L2ARC metadata. + * 3. This buffer isn't currently writing to the L2ARC. + * 4. The L2ARC entry wasn't evicted, which may + * also have invalidated the vdev. + */ + if (hdr->b_l2hdr != NULL && + !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { + l2arc_read_callback_t *cb; + + DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); + ARCSTAT_BUMP(arcstat_l2_hits); + + cb = kmem_zalloc(sizeof (l2arc_read_callback_t), + KM_SLEEP); + cb->l2rcb_buf = buf; + cb->l2rcb_spa = spa; + cb->l2rcb_bp = *bp; + cb->l2rcb_zb = *zb; + cb->l2rcb_flags = zio_flags; + + /* + * l2arc read. The SCL_L2ARC lock will be + * released by l2arc_read_done(). + */ + rzio = zio_read_phys(pio, vd, addr, size, + buf->b_data, ZIO_CHECKSUM_OFF, + l2arc_read_done, cb, priority, zio_flags | + ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | + ZIO_FLAG_DONT_PROPAGATE | + ZIO_FLAG_DONT_RETRY, B_FALSE); + DTRACE_PROBE2(l2arc__read, vdev_t *, vd, + zio_t *, rzio); + + if (*arc_flags & ARC_NOWAIT) { + zio_nowait(rzio); + return (0); + } + + ASSERT(*arc_flags & ARC_WAIT); + if (zio_wait(rzio) == 0) + return (0); + + /* l2arc read error; goto zio_read() */ + } else { + DTRACE_PROBE1(l2arc__miss, + arc_buf_hdr_t *, hdr); + ARCSTAT_BUMP(arcstat_l2_misses); + if (HDR_L2_WRITING(hdr)) + ARCSTAT_BUMP(arcstat_l2_rw_clash); + spa_config_exit(spa, SCL_L2ARC, vd); + } + } + + rzio = zio_read(pio, spa, bp, buf->b_data, size, + arc_read_done, buf, priority, zio_flags, zb); + + if (*arc_flags & ARC_WAIT) + return (zio_wait(rzio)); + + ASSERT(*arc_flags & ARC_NOWAIT); + zio_nowait(rzio); + } + return (0); +} + +/* + * arc_read() variant to support pool traversal. If the block is already + * in the ARC, make a copy of it; otherwise, the caller will do the I/O. + * The idea is that we don't want pool traversal filling up memory, but + * if the ARC already has the data anyway, we shouldn't pay for the I/O. + */ +int +arc_tryread(spa_t *spa, blkptr_t *bp, void *data) +{ + arc_buf_hdr_t *hdr; + kmutex_t *hash_mtx; + int rc = 0; + + hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); + + if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { + arc_buf_t *buf = hdr->b_buf; + + ASSERT(buf); + while (buf->b_data == NULL) { + buf = buf->b_next; + ASSERT(buf); + } + bcopy(buf->b_data, data, hdr->b_size); + } else { + rc = ENOENT; + } + + if (hash_mtx) + mutex_exit(hash_mtx); + + return (rc); +} + +void +arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) +{ + ASSERT(buf->b_hdr != NULL); + ASSERT(buf->b_hdr->b_state != arc_anon); + ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); + buf->b_efunc = func; + buf->b_private = private; +} + +/* + * This is used by the DMU to let the ARC know that a buffer is + * being evicted, so the ARC should clean up. If this arc buf + * is not yet in the evicted state, it will be put there. + */ +int +arc_buf_evict(arc_buf_t *buf) +{ + arc_buf_hdr_t *hdr; + kmutex_t *hash_lock; + arc_buf_t **bufp; + + rw_enter(&buf->b_lock, RW_WRITER); + hdr = buf->b_hdr; + if (hdr == NULL) { + /* + * We are in arc_do_user_evicts(). + */ + ASSERT(buf->b_data == NULL); + rw_exit(&buf->b_lock); + return (0); + } else if (buf->b_data == NULL) { + arc_buf_t copy = *buf; /* structure assignment */ + /* + * We are on the eviction list; process this buffer now + * but let arc_do_user_evicts() do the reaping. + */ + buf->b_efunc = NULL; + rw_exit(&buf->b_lock); + VERIFY(copy.b_efunc(©) == 0); + return (1); + } + hash_lock = HDR_LOCK(hdr); + mutex_enter(hash_lock); + + ASSERT(buf->b_hdr == hdr); + ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); + ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); + + /* + * Pull this buffer off of the hdr + */ + bufp = &hdr->b_buf; + while (*bufp != buf) + bufp = &(*bufp)->b_next; + *bufp = buf->b_next; + + ASSERT(buf->b_data != NULL); + arc_buf_destroy(buf, FALSE, FALSE); + + if (hdr->b_datacnt == 0) { + arc_state_t *old_state = hdr->b_state; + arc_state_t *evicted_state; + + ASSERT(refcount_is_zero(&hdr->b_refcnt)); + + evicted_state = + (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; + + mutex_enter(&old_state->arcs_mtx); + mutex_enter(&evicted_state->arcs_mtx); + + arc_change_state(evicted_state, hdr, hash_lock); + ASSERT(HDR_IN_HASH_TABLE(hdr)); + hdr->b_flags |= ARC_IN_HASH_TABLE; + hdr->b_flags &= ~ARC_BUF_AVAILABLE; + + mutex_exit(&evicted_state->arcs_mtx); + mutex_exit(&old_state->arcs_mtx); + } + mutex_exit(hash_lock); + rw_exit(&buf->b_lock); + + VERIFY(buf->b_efunc(buf) == 0); + buf->b_efunc = NULL; + buf->b_private = NULL; + buf->b_hdr = NULL; + kmem_cache_free(buf_cache, buf); + return (1); +} + +/* + * Release this buffer from the cache. This must be done + * after a read and prior to modifying the buffer contents. + * If the buffer has more than one reference, we must make + * a new hdr for the buffer. + */ +void +arc_release(arc_buf_t *buf, void *tag) +{ + arc_buf_hdr_t *hdr; + kmutex_t *hash_lock; + l2arc_buf_hdr_t *l2hdr; + uint64_t buf_size; + + rw_enter(&buf->b_lock, RW_WRITER); + hdr = buf->b_hdr; + + /* this buffer is not on any list */ + ASSERT(refcount_count(&hdr->b_refcnt) > 0); + ASSERT(!(hdr->b_flags & ARC_STORED)); + + if (hdr->b_state == arc_anon) { + /* this buffer is already released */ + ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); + ASSERT(BUF_EMPTY(hdr)); + ASSERT(buf->b_efunc == NULL); + arc_buf_thaw(buf); + rw_exit(&buf->b_lock); + return; + } + + hash_lock = HDR_LOCK(hdr); + mutex_enter(hash_lock); + + l2hdr = hdr->b_l2hdr; + if (l2hdr) { + mutex_enter(&l2arc_buflist_mtx); + hdr->b_l2hdr = NULL; + buf_size = hdr->b_size; + } + + /* + * Do we have more than one buf? + */ + if (hdr->b_datacnt > 1) { + arc_buf_hdr_t *nhdr; + arc_buf_t **bufp; + uint64_t blksz = hdr->b_size; + spa_t *spa = hdr->b_spa; + arc_buf_contents_t type = hdr->b_type; + uint32_t flags = hdr->b_flags; + + ASSERT(hdr->b_buf != buf || buf->b_next != NULL); + /* + * Pull the data off of this buf and attach it to + * a new anonymous buf. + */ + (void) remove_reference(hdr, hash_lock, tag); + bufp = &hdr->b_buf; + while (*bufp != buf) + bufp = &(*bufp)->b_next; + *bufp = (*bufp)->b_next; + buf->b_next = NULL; + + ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); + atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); + if (refcount_is_zero(&hdr->b_refcnt)) { + uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; + ASSERT3U(*size, >=, hdr->b_size); + atomic_add_64(size, -hdr->b_size); + } + hdr->b_datacnt -= 1; + arc_cksum_verify(buf); + + mutex_exit(hash_lock); + + nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); + nhdr->b_size = blksz; + nhdr->b_spa = spa; + nhdr->b_type = type; + nhdr->b_buf = buf; + nhdr->b_state = arc_anon; + nhdr->b_arc_access = 0; + nhdr->b_flags = flags & ARC_L2_WRITING; + nhdr->b_l2hdr = NULL; + nhdr->b_datacnt = 1; + nhdr->b_freeze_cksum = NULL; + (void) refcount_add(&nhdr->b_refcnt, tag); + buf->b_hdr = nhdr; + rw_exit(&buf->b_lock); + atomic_add_64(&arc_anon->arcs_size, blksz); + } else { + rw_exit(&buf->b_lock); + ASSERT(refcount_count(&hdr->b_refcnt) == 1); + ASSERT(!list_link_active(&hdr->b_arc_node)); + ASSERT(!HDR_IO_IN_PROGRESS(hdr)); + arc_change_state(arc_anon, hdr, hash_lock); + hdr->b_arc_access = 0; + mutex_exit(hash_lock); + + bzero(&hdr->b_dva, sizeof (dva_t)); + hdr->b_birth = 0; + hdr->b_cksum0 = 0; + arc_buf_thaw(buf); + } + buf->b_efunc = NULL; + buf->b_private = NULL; + + if (l2hdr) { + list_remove(l2hdr->b_dev->l2ad_buflist, hdr); + kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); + ARCSTAT_INCR(arcstat_l2_size, -buf_size); + mutex_exit(&l2arc_buflist_mtx); + } +} + +int +arc_released(arc_buf_t *buf) +{ + int released; + + rw_enter(&buf->b_lock, RW_READER); + released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); + rw_exit(&buf->b_lock); + return (released); +} + +int +arc_has_callback(arc_buf_t *buf) +{ + int callback; + + rw_enter(&buf->b_lock, RW_READER); + callback = (buf->b_efunc != NULL); + rw_exit(&buf->b_lock); + return (callback); +} + +#ifdef ZFS_DEBUG +int +arc_referenced(arc_buf_t *buf) +{ + int referenced; + + rw_enter(&buf->b_lock, RW_READER); + referenced = (refcount_count(&buf->b_hdr->b_refcnt)); + rw_exit(&buf->b_lock); + return (referenced); +} +#endif + +static void +arc_write_ready(zio_t *zio) +{ + arc_write_callback_t *callback = zio->io_private; + arc_buf_t *buf = callback->awcb_buf; + arc_buf_hdr_t *hdr = buf->b_hdr; + + ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); + callback->awcb_ready(zio, buf, callback->awcb_private); + + /* + * If the IO is already in progress, then this is a re-write + * attempt, so we need to thaw and re-compute the cksum. + * It is the responsibility of the callback to handle the + * accounting for any re-write attempt. + */ + if (HDR_IO_IN_PROGRESS(hdr)) { + mutex_enter(&hdr->b_freeze_lock); + if (hdr->b_freeze_cksum != NULL) { + kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); + hdr->b_freeze_cksum = NULL; + } + mutex_exit(&hdr->b_freeze_lock); + } + arc_cksum_compute(buf, B_FALSE); + hdr->b_flags |= ARC_IO_IN_PROGRESS; +} + +static void +arc_write_done(zio_t *zio) +{ + arc_write_callback_t *callback = zio->io_private; + arc_buf_t *buf = callback->awcb_buf; + arc_buf_hdr_t *hdr = buf->b_hdr; + + hdr->b_acb = NULL; + + hdr->b_dva = *BP_IDENTITY(zio->io_bp); + hdr->b_birth = zio->io_bp->blk_birth; + hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; + /* + * If the block to be written was all-zero, we may have + * compressed it away. In this case no write was performed + * so there will be no dva/birth-date/checksum. The buffer + * must therefor remain anonymous (and uncached). + */ + if (!BUF_EMPTY(hdr)) { + arc_buf_hdr_t *exists; + kmutex_t *hash_lock; + + arc_cksum_verify(buf); + + exists = buf_hash_insert(hdr, &hash_lock); + if (exists) { + /* + * This can only happen if we overwrite for + * sync-to-convergence, because we remove + * buffers from the hash table when we arc_free(). + */ + ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); + ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), + BP_IDENTITY(zio->io_bp))); + ASSERT3U(zio->io_bp_orig.blk_birth, ==, + zio->io_bp->blk_birth); + + ASSERT(refcount_is_zero(&exists->b_refcnt)); + arc_change_state(arc_anon, exists, hash_lock); + mutex_exit(hash_lock); + arc_hdr_destroy(exists); + exists = buf_hash_insert(hdr, &hash_lock); + ASSERT3P(exists, ==, NULL); + } + hdr->b_flags &= ~ARC_IO_IN_PROGRESS; + /* if it's not anon, we are doing a scrub */ + if (hdr->b_state == arc_anon) + arc_access(hdr, hash_lock); + mutex_exit(hash_lock); + } else if (callback->awcb_done == NULL) { + int destroy_hdr; + /* + * This is an anonymous buffer with no user callback, + * destroy it if there are no active references. + */ + mutex_enter(&arc_eviction_mtx); + destroy_hdr = refcount_is_zero(&hdr->b_refcnt); + hdr->b_flags &= ~ARC_IO_IN_PROGRESS; + mutex_exit(&arc_eviction_mtx); + if (destroy_hdr) + arc_hdr_destroy(hdr); + } else { + hdr->b_flags &= ~ARC_IO_IN_PROGRESS; + } + hdr->b_flags &= ~ARC_STORED; + + if (callback->awcb_done) { + ASSERT(!refcount_is_zero(&hdr->b_refcnt)); + callback->awcb_done(zio, buf, callback->awcb_private); + } + + kmem_free(callback, sizeof (arc_write_callback_t)); +} + +void +write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) +{ + boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); + + /* Determine checksum setting */ + if (ismd) { + /* + * Metadata always gets checksummed. If the data + * checksum is multi-bit correctable, and it's not a + * ZBT-style checksum, then it's suitable for metadata + * as well. Otherwise, the metadata checksum defaults + * to fletcher4. + */ + if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && + !zio_checksum_table[wp->wp_oschecksum].ci_zbt) + zp->zp_checksum = wp->wp_oschecksum; + else + zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; + } else { + zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, + wp->wp_oschecksum); + } + + /* Determine compression setting */ + if (ismd) { + /* + * XXX -- we should design a compression algorithm + * that specializes in arrays of bps. + */ + zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : + ZIO_COMPRESS_LZJB; + } else { + zp->zp_compress = zio_compress_select(wp->wp_dncompress, + wp->wp_oscompress); + } + + zp->zp_type = wp->wp_type; + zp->zp_level = wp->wp_level; + zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); +} + +zio_t * +arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, + boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, + arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, + int zio_flags, const zbookmark_t *zb) +{ + arc_buf_hdr_t *hdr = buf->b_hdr; + arc_write_callback_t *callback; + zio_t *zio; + zio_prop_t zp; + + ASSERT(ready != NULL); + ASSERT(!HDR_IO_ERROR(hdr)); + ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); + ASSERT(hdr->b_acb == 0); + if (l2arc) + hdr->b_flags |= ARC_L2CACHE; + callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); + callback->awcb_ready = ready; + callback->awcb_done = done; + callback->awcb_private = private; + callback->awcb_buf = buf; + + write_policy(spa, wp, &zp); + zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, + arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); + + return (zio); +} + +int +arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + zio_done_func_t *done, void *private, uint32_t arc_flags) +{ + arc_buf_hdr_t *ab; + kmutex_t *hash_lock; + zio_t *zio; + + /* + * If this buffer is in the cache, release it, so it + * can be re-used. + */ + ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); + if (ab != NULL) { + /* + * The checksum of blocks to free is not always + * preserved (eg. on the deadlist). However, if it is + * nonzero, it should match what we have in the cache. + */ + ASSERT(bp->blk_cksum.zc_word[0] == 0 || + bp->blk_cksum.zc_word[0] == ab->b_cksum0 || + bp->blk_fill == BLK_FILL_ALREADY_FREED); + + if (ab->b_state != arc_anon) + arc_change_state(arc_anon, ab, hash_lock); + if (HDR_IO_IN_PROGRESS(ab)) { + /* + * This should only happen when we prefetch. + */ + ASSERT(ab->b_flags & ARC_PREFETCH); + ASSERT3U(ab->b_datacnt, ==, 1); + ab->b_flags |= ARC_FREED_IN_READ; + if (HDR_IN_HASH_TABLE(ab)) + buf_hash_remove(ab); + ab->b_arc_access = 0; + bzero(&ab->b_dva, sizeof (dva_t)); + ab->b_birth = 0; + ab->b_cksum0 = 0; + ab->b_buf->b_efunc = NULL; + ab->b_buf->b_private = NULL; + mutex_exit(hash_lock); + } else if (refcount_is_zero(&ab->b_refcnt)) { + ab->b_flags |= ARC_FREE_IN_PROGRESS; + mutex_exit(hash_lock); + arc_hdr_destroy(ab); + ARCSTAT_BUMP(arcstat_deleted); + } else { + /* + * We still have an active reference on this + * buffer. This can happen, e.g., from + * dbuf_unoverride(). + */ + ASSERT(!HDR_IN_HASH_TABLE(ab)); + ab->b_arc_access = 0; + bzero(&ab->b_dva, sizeof (dva_t)); + ab->b_birth = 0; + ab->b_cksum0 = 0; + ab->b_buf->b_efunc = NULL; + ab->b_buf->b_private = NULL; + mutex_exit(hash_lock); + } + } + + zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); + + if (arc_flags & ARC_WAIT) + return (zio_wait(zio)); + + ASSERT(arc_flags & ARC_NOWAIT); + zio_nowait(zio); + + return (0); +} + +static int +arc_memory_throttle(uint64_t reserve, uint64_t txg) +{ +#ifdef _KERNEL + uint64_t inflight_data = arc_anon->arcs_size; + uint64_t available_memory = ptob(freemem); + static uint64_t page_load = 0; + static uint64_t last_txg = 0; + +#if defined(__i386) + available_memory = + MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); +#endif + if (available_memory >= zfs_write_limit_max) + return (0); + + if (txg > last_txg) { + last_txg = txg; + page_load = 0; + } + /* + * If we are in pageout, we know that memory is already tight, + * the arc is already going to be evicting, so we just want to + * continue to let page writes occur as quickly as possible. + */ + if (curproc == proc_pageout) { + if (page_load > MAX(ptob(minfree), available_memory) / 4) + return (ERESTART); + /* Note: reserve is inflated, so we deflate */ + page_load += reserve / 8; + return (0); + } else if (page_load > 0 && arc_reclaim_needed()) { + /* memory is low, delay before restarting */ + ARCSTAT_INCR(arcstat_memory_throttle_count, 1); + return (EAGAIN); + } + page_load = 0; + + if (arc_size > arc_c_min) { + uint64_t evictable_memory = + arc_mru->arcs_lsize[ARC_BUFC_DATA] + + arc_mru->arcs_lsize[ARC_BUFC_METADATA] + + arc_mfu->arcs_lsize[ARC_BUFC_DATA] + + arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; + available_memory += MIN(evictable_memory, arc_size - arc_c_min); + } + + if (inflight_data > available_memory / 4) { + ARCSTAT_INCR(arcstat_memory_throttle_count, 1); + return (ERESTART); + } +#endif + return (0); +} + +void +arc_tempreserve_clear(uint64_t reserve) +{ + atomic_add_64(&arc_tempreserve, -reserve); + ASSERT((int64_t)arc_tempreserve >= 0); +} + +int +arc_tempreserve_space(uint64_t reserve, uint64_t txg) +{ + int error; + +#ifdef ZFS_DEBUG + /* + * Once in a while, fail for no reason. Everything should cope. + */ + if (spa_get_random(10000) == 0) { + dprintf("forcing random failure\n"); + return (ERESTART); + } +#endif + if (reserve > arc_c/4 && !arc_no_grow) + arc_c = MIN(arc_c_max, reserve * 4); + if (reserve > arc_c) + return (ENOMEM); + + /* + * Writes will, almost always, require additional memory allocations + * in order to compress/encrypt/etc the data. We therefor need to + * make sure that there is sufficient available memory for this. + */ + if (error = arc_memory_throttle(reserve, txg)) + return (error); + + /* + * Throttle writes when the amount of dirty data in the cache + * gets too large. We try to keep the cache less than half full + * of dirty blocks so that our sync times don't grow too large. + * Note: if two requests come in concurrently, we might let them + * both succeed, when one of them should fail. Not a huge deal. + */ + if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && + arc_anon->arcs_size > arc_c / 4) { + dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " + "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", + arc_tempreserve>>10, + arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, + arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, + reserve>>10, arc_c>>10); + return (ERESTART); + } + atomic_add_64(&arc_tempreserve, reserve); + return (0); +} + +void +arc_init(void) +{ + mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); + + /* Convert seconds to clock ticks */ + arc_min_prefetch_lifespan = 1 * hz; + + /* Start out with 1/8 of all memory */ + arc_c = physmem * PAGESIZE / 8; + +#ifdef _KERNEL + /* + * On architectures where the physical memory can be larger + * than the addressable space (intel in 32-bit mode), we may + * need to limit the cache to 1/8 of VM size. + */ + arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); +#endif + + /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ + arc_c_min = MAX(arc_c / 4, 64<<20); + /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ + if (arc_c * 8 >= 1<<30) + arc_c_max = (arc_c * 8) - (1<<30); + else + arc_c_max = arc_c_min; + arc_c_max = MAX(arc_c * 6, arc_c_max); + + /* + * Allow the tunables to override our calculations if they are + * reasonable (ie. over 64MB) + */ + if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) + arc_c_max = zfs_arc_max; + if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) + arc_c_min = zfs_arc_min; + + arc_c = arc_c_max; + arc_p = (arc_c >> 1); + + /* limit meta-data to 1/4 of the arc capacity */ + arc_meta_limit = arc_c_max / 4; + + /* Allow the tunable to override if it is reasonable */ + if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) + arc_meta_limit = zfs_arc_meta_limit; + + if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) + arc_c_min = arc_meta_limit / 2; + + /* if kmem_flags are set, lets try to use less memory */ + if (kmem_debugging()) + arc_c = arc_c / 2; + if (arc_c < arc_c_min) + arc_c = arc_c_min; + + arc_anon = &ARC_anon; + arc_mru = &ARC_mru; + arc_mru_ghost = &ARC_mru_ghost; + arc_mfu = &ARC_mfu; + arc_mfu_ghost = &ARC_mfu_ghost; + arc_l2c_only = &ARC_l2c_only; + arc_size = 0; + + mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); + + list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], + sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); + + buf_init(); + + arc_thread_exit = 0; + arc_eviction_list = NULL; + mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); + bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); + + arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, + sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); + + if (arc_ksp != NULL) { + arc_ksp->ks_data = &arc_stats; + kstat_install(arc_ksp); + } + + (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, + TS_RUN, minclsyspri); + + arc_dead = FALSE; + arc_warm = B_FALSE; + + if (zfs_write_limit_max == 0) + zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; + else + zfs_write_limit_shift = 0; + mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); +} + +void +arc_fini(void) +{ + mutex_enter(&arc_reclaim_thr_lock); + arc_thread_exit = 1; + while (arc_thread_exit != 0) + cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); + mutex_exit(&arc_reclaim_thr_lock); + + arc_flush(NULL); + + arc_dead = TRUE; + + if (arc_ksp != NULL) { + kstat_delete(arc_ksp); + arc_ksp = NULL; + } + + mutex_destroy(&arc_eviction_mtx); + mutex_destroy(&arc_reclaim_thr_lock); + cv_destroy(&arc_reclaim_thr_cv); + + list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); + list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); + list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); + list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); + list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); + list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); + list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); + list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); + + mutex_destroy(&arc_anon->arcs_mtx); + mutex_destroy(&arc_mru->arcs_mtx); + mutex_destroy(&arc_mru_ghost->arcs_mtx); + mutex_destroy(&arc_mfu->arcs_mtx); + mutex_destroy(&arc_mfu_ghost->arcs_mtx); + + mutex_destroy(&zfs_write_limit_lock); + + buf_fini(); +} + +/* + * Level 2 ARC + * + * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. + * It uses dedicated storage devices to hold cached data, which are populated + * using large infrequent writes. The main role of this cache is to boost + * the performance of random read workloads. The intended L2ARC devices + * include short-stroked disks, solid state disks, and other media with + * substantially faster read latency than disk. + * + * +-----------------------+ + * | ARC | + * +-----------------------+ + * | ^ ^ + * | | | + * l2arc_feed_thread() arc_read() + * | | | + * | l2arc read | + * V | | + * +---------------+ | + * | L2ARC | | + * +---------------+ | + * | ^ | + * l2arc_write() | | + * | | | + * V | | + * +-------+ +-------+ + * | vdev | | vdev | + * | cache | | cache | + * +-------+ +-------+ + * +=========+ .-----. + * : L2ARC : |-_____-| + * : devices : | Disks | + * +=========+ `-_____-' + * + * Read requests are satisfied from the following sources, in order: + * + * 1) ARC + * 2) vdev cache of L2ARC devices + * 3) L2ARC devices + * 4) vdev cache of disks + * 5) disks + * + * Some L2ARC device types exhibit extremely slow write performance. + * To accommodate for this there are some significant differences between + * the L2ARC and traditional cache design: + * + * 1. There is no eviction path from the ARC to the L2ARC. Evictions from + * the ARC behave as usual, freeing buffers and placing headers on ghost + * lists. The ARC does not send buffers to the L2ARC during eviction as + * this would add inflated write latencies for all ARC memory pressure. + * + * 2. The L2ARC attempts to cache data from the ARC before it is evicted. + * It does this by periodically scanning buffers from the eviction-end of + * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are + * not already there. It scans until a headroom of buffers is satisfied, + * which itself is a buffer for ARC eviction. The thread that does this is + * l2arc_feed_thread(), illustrated below; example sizes are included to + * provide a better sense of ratio than this diagram: + * + * head --> tail + * +---------------------+----------+ + * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC + * +---------------------+----------+ | o L2ARC eligible + * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer + * +---------------------+----------+ | + * 15.9 Gbytes ^ 32 Mbytes | + * headroom | + * l2arc_feed_thread() + * | + * l2arc write hand <--[oooo]--' + * | 8 Mbyte + * | write max + * V + * +==============================+ + * L2ARC dev |####|#|###|###| |####| ... | + * +==============================+ + * 32 Gbytes + * + * 3. If an ARC buffer is copied to the L2ARC but then hit instead of + * evicted, then the L2ARC has cached a buffer much sooner than it probably + * needed to, potentially wasting L2ARC device bandwidth and storage. It is + * safe to say that this is an uncommon case, since buffers at the end of + * the ARC lists have moved there due to inactivity. + * + * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, + * then the L2ARC simply misses copying some buffers. This serves as a + * pressure valve to prevent heavy read workloads from both stalling the ARC + * with waits and clogging the L2ARC with writes. This also helps prevent + * the potential for the L2ARC to churn if it attempts to cache content too + * quickly, such as during backups of the entire pool. + * + * 5. After system boot and before the ARC has filled main memory, there are + * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru + * lists can remain mostly static. Instead of searching from tail of these + * lists as pictured, the l2arc_feed_thread() will search from the list heads + * for eligible buffers, greatly increasing its chance of finding them. + * + * The L2ARC device write speed is also boosted during this time so that + * the L2ARC warms up faster. Since there have been no ARC evictions yet, + * there are no L2ARC reads, and no fear of degrading read performance + * through increased writes. + * + * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that + * the vdev queue can aggregate them into larger and fewer writes. Each + * device is written to in a rotor fashion, sweeping writes through + * available space then repeating. + * + * 7. The L2ARC does not store dirty content. It never needs to flush + * write buffers back to disk based storage. + * + * 8. If an ARC buffer is written (and dirtied) which also exists in the + * L2ARC, the now stale L2ARC buffer is immediately dropped. + * + * The performance of the L2ARC can be tweaked by a number of tunables, which + * may be necessary for different workloads: + * + * l2arc_write_max max write bytes per interval + * l2arc_write_boost extra write bytes during device warmup + * l2arc_noprefetch skip caching prefetched buffers + * l2arc_headroom number of max device writes to precache + * l2arc_feed_secs seconds between L2ARC writing + * + * Tunables may be removed or added as future performance improvements are + * integrated, and also may become zpool properties. + */ + +static void +l2arc_hdr_stat_add(void) +{ + ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); + ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); +} + +static void +l2arc_hdr_stat_remove(void) +{ + ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); + ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); +} + +/* + * Cycle through L2ARC devices. This is how L2ARC load balances. + * If a device is returned, this also returns holding the spa config lock. + */ +static l2arc_dev_t * +l2arc_dev_get_next(void) +{ + l2arc_dev_t *first, *next = NULL; + + /* + * Lock out the removal of spas (spa_namespace_lock), then removal + * of cache devices (l2arc_dev_mtx). Once a device has been selected, + * both locks will be dropped and a spa config lock held instead. + */ + mutex_enter(&spa_namespace_lock); + mutex_enter(&l2arc_dev_mtx); + + /* if there are no vdevs, there is nothing to do */ + if (l2arc_ndev == 0) + goto out; + + first = NULL; + next = l2arc_dev_last; + do { + /* loop around the list looking for a non-faulted vdev */ + if (next == NULL) { + next = list_head(l2arc_dev_list); + } else { + next = list_next(l2arc_dev_list, next); + if (next == NULL) + next = list_head(l2arc_dev_list); + } + + /* if we have come back to the start, bail out */ + if (first == NULL) + first = next; + else if (next == first) + break; + + } while (vdev_is_dead(next->l2ad_vdev)); + + /* if we were unable to find any usable vdevs, return NULL */ + if (vdev_is_dead(next->l2ad_vdev)) + next = NULL; + + l2arc_dev_last = next; + +out: + mutex_exit(&l2arc_dev_mtx); + + /* + * Grab the config lock to prevent the 'next' device from being + * removed while we are writing to it. + */ + if (next != NULL) + spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); + mutex_exit(&spa_namespace_lock); + + return (next); +} + +/* + * Free buffers that were tagged for destruction. + */ +static void +l2arc_do_free_on_write() +{ + list_t *buflist; + l2arc_data_free_t *df, *df_prev; + + mutex_enter(&l2arc_free_on_write_mtx); + buflist = l2arc_free_on_write; + + for (df = list_tail(buflist); df; df = df_prev) { + df_prev = list_prev(buflist, df); + ASSERT(df->l2df_data != NULL); + ASSERT(df->l2df_func != NULL); + df->l2df_func(df->l2df_data, df->l2df_size); + list_remove(buflist, df); + kmem_free(df, sizeof (l2arc_data_free_t)); + } + + mutex_exit(&l2arc_free_on_write_mtx); +} + +/* + * A write to a cache device has completed. Update all headers to allow + * reads from these buffers to begin. + */ +static void +l2arc_write_done(zio_t *zio) +{ + l2arc_write_callback_t *cb; + l2arc_dev_t *dev; + list_t *buflist; + arc_buf_hdr_t *head, *ab, *ab_prev; + l2arc_buf_hdr_t *abl2; + kmutex_t *hash_lock; + + cb = zio->io_private; + ASSERT(cb != NULL); + dev = cb->l2wcb_dev; + ASSERT(dev != NULL); + head = cb->l2wcb_head; + ASSERT(head != NULL); + buflist = dev->l2ad_buflist; + ASSERT(buflist != NULL); + DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, + l2arc_write_callback_t *, cb); + + if (zio->io_error != 0) + ARCSTAT_BUMP(arcstat_l2_writes_error); + + mutex_enter(&l2arc_buflist_mtx); + + /* + * All writes completed, or an error was hit. + */ + for (ab = list_prev(buflist, head); ab; ab = ab_prev) { + ab_prev = list_prev(buflist, ab); + + hash_lock = HDR_LOCK(ab); + if (!mutex_tryenter(hash_lock)) { + /* + * This buffer misses out. It may be in a stage + * of eviction. Its ARC_L2_WRITING flag will be + * left set, denying reads to this buffer. + */ + ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); + continue; + } + + if (zio->io_error != 0) { + /* + * Error - drop L2ARC entry. + */ + list_remove(buflist, ab); + abl2 = ab->b_l2hdr; + ab->b_l2hdr = NULL; + kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); + ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); + } + + /* + * Allow ARC to begin reads to this L2ARC entry. + */ + ab->b_flags &= ~ARC_L2_WRITING; + + mutex_exit(hash_lock); + } + + atomic_inc_64(&l2arc_writes_done); + list_remove(buflist, head); + kmem_cache_free(hdr_cache, head); + mutex_exit(&l2arc_buflist_mtx); + + l2arc_do_free_on_write(); + + kmem_free(cb, sizeof (l2arc_write_callback_t)); +} + +/* + * A read to a cache device completed. Validate buffer contents before + * handing over to the regular ARC routines. + */ +static void +l2arc_read_done(zio_t *zio) +{ + l2arc_read_callback_t *cb; + arc_buf_hdr_t *hdr; + arc_buf_t *buf; + kmutex_t *hash_lock; + int equal; + + ASSERT(zio->io_vd != NULL); + ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); + + spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); + + cb = zio->io_private; + ASSERT(cb != NULL); + buf = cb->l2rcb_buf; + ASSERT(buf != NULL); + hdr = buf->b_hdr; + ASSERT(hdr != NULL); + + hash_lock = HDR_LOCK(hdr); + mutex_enter(hash_lock); + + /* + * Check this survived the L2ARC journey. + */ + equal = arc_cksum_equal(buf); + if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { + mutex_exit(hash_lock); + zio->io_private = buf; + zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ + zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ + arc_read_done(zio); + } else { + mutex_exit(hash_lock); + /* + * Buffer didn't survive caching. Increment stats and + * reissue to the original storage device. + */ + if (zio->io_error != 0) { + ARCSTAT_BUMP(arcstat_l2_io_error); + } else { + zio->io_error = EIO; + } + if (!equal) + ARCSTAT_BUMP(arcstat_l2_cksum_bad); + + /* + * If there's no waiter, issue an async i/o to the primary + * storage now. If there *is* a waiter, the caller must + * issue the i/o in a context where it's OK to block. + */ + if (zio->io_waiter == NULL) + zio_nowait(zio_read(zio->io_parent, + cb->l2rcb_spa, &cb->l2rcb_bp, + buf->b_data, zio->io_size, arc_read_done, buf, + zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); + } + + kmem_free(cb, sizeof (l2arc_read_callback_t)); +} + +/* + * This is the list priority from which the L2ARC will search for pages to + * cache. This is used within loops (0..3) to cycle through lists in the + * desired order. This order can have a significant effect on cache + * performance. + * + * Currently the metadata lists are hit first, MFU then MRU, followed by + * the data lists. This function returns a locked list, and also returns + * the lock pointer. + */ +static list_t * +l2arc_list_locked(int list_num, kmutex_t **lock) +{ + list_t *list; + + ASSERT(list_num >= 0 && list_num <= 3); + + switch (list_num) { + case 0: + list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; + *lock = &arc_mfu->arcs_mtx; + break; + case 1: + list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; + *lock = &arc_mru->arcs_mtx; + break; + case 2: + list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; + *lock = &arc_mfu->arcs_mtx; + break; + case 3: + list = &arc_mru->arcs_list[ARC_BUFC_DATA]; + *lock = &arc_mru->arcs_mtx; + break; + } + + ASSERT(!(MUTEX_HELD(*lock))); + mutex_enter(*lock); + return (list); +} + +/* + * Evict buffers from the device write hand to the distance specified in + * bytes. This distance may span populated buffers, it may span nothing. + * This is clearing a region on the L2ARC device ready for writing. + * If the 'all' boolean is set, every buffer is evicted. + */ +static void +l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) +{ + list_t *buflist; + l2arc_buf_hdr_t *abl2; + arc_buf_hdr_t *ab, *ab_prev; + kmutex_t *hash_lock; + uint64_t taddr; + + buflist = dev->l2ad_buflist; + + if (buflist == NULL) + return; + + if (!all && dev->l2ad_first) { + /* + * This is the first sweep through the device. There is + * nothing to evict. + */ + return; + } + + if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { + /* + * When nearing the end of the device, evict to the end + * before the device write hand jumps to the start. + */ + taddr = dev->l2ad_end; + } else { + taddr = dev->l2ad_hand + distance; + } + DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, + uint64_t, taddr, boolean_t, all); + +top: + mutex_enter(&l2arc_buflist_mtx); + for (ab = list_tail(buflist); ab; ab = ab_prev) { + ab_prev = list_prev(buflist, ab); + + hash_lock = HDR_LOCK(ab); + if (!mutex_tryenter(hash_lock)) { + /* + * Missed the hash lock. Retry. + */ + ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); + mutex_exit(&l2arc_buflist_mtx); + mutex_enter(hash_lock); + mutex_exit(hash_lock); + goto top; + } + + if (HDR_L2_WRITE_HEAD(ab)) { + /* + * We hit a write head node. Leave it for + * l2arc_write_done(). + */ + list_remove(buflist, ab); + mutex_exit(hash_lock); + continue; + } + + if (!all && ab->b_l2hdr != NULL && + (ab->b_l2hdr->b_daddr > taddr || + ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { + /* + * We've evicted to the target address, + * or the end of the device. + */ + mutex_exit(hash_lock); + break; + } + + if (HDR_FREE_IN_PROGRESS(ab)) { + /* + * Already on the path to destruction. + */ + mutex_exit(hash_lock); + continue; + } + + if (ab->b_state == arc_l2c_only) { + ASSERT(!HDR_L2_READING(ab)); + /* + * This doesn't exist in the ARC. Destroy. + * arc_hdr_destroy() will call list_remove() + * and decrement arcstat_l2_size. + */ + arc_change_state(arc_anon, ab, hash_lock); + arc_hdr_destroy(ab); + } else { + /* + * Invalidate issued or about to be issued + * reads, since we may be about to write + * over this location. + */ + if (HDR_L2_READING(ab)) { + ARCSTAT_BUMP(arcstat_l2_evict_reading); + ab->b_flags |= ARC_L2_EVICTED; + } + + /* + * Tell ARC this no longer exists in L2ARC. + */ + if (ab->b_l2hdr != NULL) { + abl2 = ab->b_l2hdr; + ab->b_l2hdr = NULL; + kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); + ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); + } + list_remove(buflist, ab); + + /* + * This may have been leftover after a + * failed write. + */ + ab->b_flags &= ~ARC_L2_WRITING; + } + mutex_exit(hash_lock); + } + mutex_exit(&l2arc_buflist_mtx); + + spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); + dev->l2ad_evict = taddr; +} + +/* + * Find and write ARC buffers to the L2ARC device. + * + * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid + * for reading until they have completed writing. + */ +static void +l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) +{ + arc_buf_hdr_t *ab, *ab_prev, *head; + l2arc_buf_hdr_t *hdrl2; + list_t *list; + uint64_t passed_sz, write_sz, buf_sz, headroom; + void *buf_data; + kmutex_t *hash_lock, *list_lock; + boolean_t have_lock, full; + l2arc_write_callback_t *cb; + zio_t *pio, *wzio; + + ASSERT(dev->l2ad_vdev != NULL); + + pio = NULL; + write_sz = 0; + full = B_FALSE; + head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); + head->b_flags |= ARC_L2_WRITE_HEAD; + + /* + * Copy buffers for L2ARC writing. + */ + mutex_enter(&l2arc_buflist_mtx); + for (int try = 0; try <= 3; try++) { + list = l2arc_list_locked(try, &list_lock); + passed_sz = 0; + + /* + * L2ARC fast warmup. + * + * Until the ARC is warm and starts to evict, read from the + * head of the ARC lists rather than the tail. + */ + headroom = target_sz * l2arc_headroom; + if (arc_warm == B_FALSE) + ab = list_head(list); + else + ab = list_tail(list); + + for (; ab; ab = ab_prev) { + if (arc_warm == B_FALSE) + ab_prev = list_next(list, ab); + else + ab_prev = list_prev(list, ab); + + hash_lock = HDR_LOCK(ab); + have_lock = MUTEX_HELD(hash_lock); + if (!have_lock && !mutex_tryenter(hash_lock)) { + /* + * Skip this buffer rather than waiting. + */ + continue; + } + + passed_sz += ab->b_size; + if (passed_sz > headroom) { + /* + * Searched too far. + */ + mutex_exit(hash_lock); + break; + } + + if (ab->b_spa != spa) { + mutex_exit(hash_lock); + continue; + } + + if (ab->b_l2hdr != NULL) { + /* + * Already in L2ARC. + */ + mutex_exit(hash_lock); + continue; + } + + if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { + mutex_exit(hash_lock); + continue; + } + + if ((write_sz + ab->b_size) > target_sz) { + full = B_TRUE; + mutex_exit(hash_lock); + break; + } + + if (ab->b_buf == NULL) { + DTRACE_PROBE1(l2arc__buf__null, void *, ab); + mutex_exit(hash_lock); + continue; + } + + if (pio == NULL) { + /* + * Insert a dummy header on the buflist so + * l2arc_write_done() can find where the + * write buffers begin without searching. + */ + list_insert_head(dev->l2ad_buflist, head); + + cb = kmem_alloc( + sizeof (l2arc_write_callback_t), KM_SLEEP); + cb->l2wcb_dev = dev; + cb->l2wcb_head = head; + pio = zio_root(spa, l2arc_write_done, cb, + ZIO_FLAG_CANFAIL); + } + + /* + * Create and add a new L2ARC header. + */ + hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); + hdrl2->b_dev = dev; + hdrl2->b_daddr = dev->l2ad_hand; + + ab->b_flags |= ARC_L2_WRITING; + ab->b_l2hdr = hdrl2; + list_insert_head(dev->l2ad_buflist, ab); + buf_data = ab->b_buf->b_data; + buf_sz = ab->b_size; + + /* + * Compute and store the buffer cksum before + * writing. On debug the cksum is verified first. + */ + arc_cksum_verify(ab->b_buf); + arc_cksum_compute(ab->b_buf, B_TRUE); + + mutex_exit(hash_lock); + + wzio = zio_write_phys(pio, dev->l2ad_vdev, + dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, + NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, + ZIO_FLAG_CANFAIL, B_FALSE); + + DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, + zio_t *, wzio); + (void) zio_nowait(wzio); + + /* + * Keep the clock hand suitably device-aligned. + */ + buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); + + write_sz += buf_sz; + dev->l2ad_hand += buf_sz; + } + + mutex_exit(list_lock); + + if (full == B_TRUE) + break; + } + mutex_exit(&l2arc_buflist_mtx); + + if (pio == NULL) { + ASSERT3U(write_sz, ==, 0); + kmem_cache_free(hdr_cache, head); + return; + } + + ASSERT3U(write_sz, <=, target_sz); + ARCSTAT_BUMP(arcstat_l2_writes_sent); + ARCSTAT_INCR(arcstat_l2_size, write_sz); + spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); + + /* + * Bump device hand to the device start if it is approaching the end. + * l2arc_evict() will already have evicted ahead for this case. + */ + if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { + spa_l2cache_space_update(dev->l2ad_vdev, 0, + dev->l2ad_end - dev->l2ad_hand); + dev->l2ad_hand = dev->l2ad_start; + dev->l2ad_evict = dev->l2ad_start; + dev->l2ad_first = B_FALSE; + } + + (void) zio_wait(pio); +} + +/* + * This thread feeds the L2ARC at regular intervals. This is the beating + * heart of the L2ARC. + */ +static void +l2arc_feed_thread(void) +{ + callb_cpr_t cpr; + l2arc_dev_t *dev; + spa_t *spa; + uint64_t size; + + CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); + + mutex_enter(&l2arc_feed_thr_lock); + + while (l2arc_thread_exit == 0) { + /* + * Pause for l2arc_feed_secs seconds between writes. + */ + CALLB_CPR_SAFE_BEGIN(&cpr); + (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, + lbolt + (hz * l2arc_feed_secs)); + CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); + + /* + * Quick check for L2ARC devices. + */ + mutex_enter(&l2arc_dev_mtx); + if (l2arc_ndev == 0) { + mutex_exit(&l2arc_dev_mtx); + continue; + } + mutex_exit(&l2arc_dev_mtx); + + /* + * This selects the next l2arc device to write to, and in + * doing so the next spa to feed from: dev->l2ad_spa. This + * will return NULL if there are now no l2arc devices or if + * they are all faulted. + * + * If a device is returned, its spa's config lock is also + * held to prevent device removal. l2arc_dev_get_next() + * will grab and release l2arc_dev_mtx. + */ + if ((dev = l2arc_dev_get_next()) == NULL) + continue; + + spa = dev->l2ad_spa; + ASSERT(spa != NULL); + + /* + * Avoid contributing to memory pressure. + */ + if (arc_reclaim_needed()) { + ARCSTAT_BUMP(arcstat_l2_abort_lowmem); + spa_config_exit(spa, SCL_L2ARC, dev); + continue; + } + + ARCSTAT_BUMP(arcstat_l2_feeds); + + size = dev->l2ad_write; + if (arc_warm == B_FALSE) + size += dev->l2ad_boost; + + /* + * Evict L2ARC buffers that will be overwritten. + */ + l2arc_evict(dev, size, B_FALSE); + + /* + * Write ARC buffers. + */ + l2arc_write_buffers(spa, dev, size); + spa_config_exit(spa, SCL_L2ARC, dev); + } + + l2arc_thread_exit = 0; + cv_broadcast(&l2arc_feed_thr_cv); + CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ + thread_exit(); +} + +boolean_t +l2arc_vdev_present(vdev_t *vd) +{ + l2arc_dev_t *dev; + + mutex_enter(&l2arc_dev_mtx); + for (dev = list_head(l2arc_dev_list); dev != NULL; + dev = list_next(l2arc_dev_list, dev)) { + if (dev->l2ad_vdev == vd) + break; + } + mutex_exit(&l2arc_dev_mtx); + + return (dev != NULL); +} + +/* + * Add a vdev for use by the L2ARC. By this point the spa has already + * validated the vdev and opened it. + */ +void +l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) +{ + l2arc_dev_t *adddev; + + ASSERT(!l2arc_vdev_present(vd)); + + /* + * Create a new l2arc device entry. + */ + adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); + adddev->l2ad_spa = spa; + adddev->l2ad_vdev = vd; + adddev->l2ad_write = l2arc_write_max; + adddev->l2ad_boost = l2arc_write_boost; + adddev->l2ad_start = start; + adddev->l2ad_end = end; + adddev->l2ad_hand = adddev->l2ad_start; + adddev->l2ad_evict = adddev->l2ad_start; + adddev->l2ad_first = B_TRUE; + ASSERT3U(adddev->l2ad_write, >, 0); + + /* + * This is a list of all ARC buffers that are still valid on the + * device. + */ + adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); + list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), + offsetof(arc_buf_hdr_t, b_l2node)); + + spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); + + /* + * Add device to global list + */ + mutex_enter(&l2arc_dev_mtx); + list_insert_head(l2arc_dev_list, adddev); + atomic_inc_64(&l2arc_ndev); + mutex_exit(&l2arc_dev_mtx); +} + +/* + * Remove a vdev from the L2ARC. + */ +void +l2arc_remove_vdev(vdev_t *vd) +{ + l2arc_dev_t *dev, *nextdev, *remdev = NULL; + + /* + * Find the device by vdev + */ + mutex_enter(&l2arc_dev_mtx); + for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { + nextdev = list_next(l2arc_dev_list, dev); + if (vd == dev->l2ad_vdev) { + remdev = dev; + break; + } + } + ASSERT(remdev != NULL); + + /* + * Remove device from global list + */ + list_remove(l2arc_dev_list, remdev); + l2arc_dev_last = NULL; /* may have been invalidated */ + atomic_dec_64(&l2arc_ndev); + mutex_exit(&l2arc_dev_mtx); + + /* + * Clear all buflists and ARC references. L2ARC device flush. + */ + l2arc_evict(remdev, 0, B_TRUE); + list_destroy(remdev->l2ad_buflist); + kmem_free(remdev->l2ad_buflist, sizeof (list_t)); + kmem_free(remdev, sizeof (l2arc_dev_t)); +} + +void +l2arc_init(void) +{ + l2arc_thread_exit = 0; + l2arc_ndev = 0; + l2arc_writes_sent = 0; + l2arc_writes_done = 0; + + mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); + mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); + + l2arc_dev_list = &L2ARC_dev_list; + l2arc_free_on_write = &L2ARC_free_on_write; + list_create(l2arc_dev_list, sizeof (l2arc_dev_t), + offsetof(l2arc_dev_t, l2ad_node)); + list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), + offsetof(l2arc_data_free_t, l2df_list_node)); +} + +void +l2arc_fini(void) +{ + /* + * This is called from dmu_fini(), which is called from spa_fini(); + * Because of this, we can assume that all l2arc devices have + * already been removed when the pools themselves were removed. + */ + + l2arc_do_free_on_write(); + + mutex_destroy(&l2arc_feed_thr_lock); + cv_destroy(&l2arc_feed_thr_cv); + mutex_destroy(&l2arc_dev_mtx); + mutex_destroy(&l2arc_buflist_mtx); + mutex_destroy(&l2arc_free_on_write_mtx); + + list_destroy(l2arc_dev_list); + list_destroy(l2arc_free_on_write); +} + +void +l2arc_start(void) +{ + if (!(spa_mode & FWRITE)) + return; + + (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, + TS_RUN, minclsyspri); +} + +void +l2arc_stop(void) +{ + if (!(spa_mode & FWRITE)) + return; + + mutex_enter(&l2arc_feed_thr_lock); + cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ + l2arc_thread_exit = 1; + while (l2arc_thread_exit != 0) + cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); + mutex_exit(&l2arc_feed_thr_lock); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/bplist.c b/external/cddl/osnet/dist/uts/common/fs/zfs/bplist.c new file mode 100644 index 000000000000..93b7741d77be --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/bplist.c @@ -0,0 +1,349 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include + +static int +bplist_hold(bplist_t *bpl) +{ + ASSERT(MUTEX_HELD(&bpl->bpl_lock)); + if (bpl->bpl_dbuf == NULL) { + int err = dmu_bonus_hold(bpl->bpl_mos, + bpl->bpl_object, bpl, &bpl->bpl_dbuf); + if (err) + return (err); + bpl->bpl_phys = bpl->bpl_dbuf->db_data; + } + return (0); +} + +uint64_t +bplist_create(objset_t *mos, int blocksize, dmu_tx_t *tx) +{ + int size; + + size = spa_version(dmu_objset_spa(mos)) < SPA_VERSION_BPLIST_ACCOUNT ? + BPLIST_SIZE_V0 : sizeof (bplist_phys_t); + + return (dmu_object_alloc(mos, DMU_OT_BPLIST, blocksize, + DMU_OT_BPLIST_HDR, size, tx)); +} + +void +bplist_destroy(objset_t *mos, uint64_t object, dmu_tx_t *tx) +{ + VERIFY(dmu_object_free(mos, object, tx) == 0); +} + +int +bplist_open(bplist_t *bpl, objset_t *mos, uint64_t object) +{ + dmu_object_info_t doi; + int err; + + err = dmu_object_info(mos, object, &doi); + if (err) + return (err); + + mutex_enter(&bpl->bpl_lock); + + ASSERT(bpl->bpl_dbuf == NULL); + ASSERT(bpl->bpl_phys == NULL); + ASSERT(bpl->bpl_cached_dbuf == NULL); + ASSERT(bpl->bpl_queue == NULL); + ASSERT(object != 0); + ASSERT3U(doi.doi_type, ==, DMU_OT_BPLIST); + ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPLIST_HDR); + + bpl->bpl_mos = mos; + bpl->bpl_object = object; + bpl->bpl_blockshift = highbit(doi.doi_data_block_size - 1); + bpl->bpl_bpshift = bpl->bpl_blockshift - SPA_BLKPTRSHIFT; + bpl->bpl_havecomp = (doi.doi_bonus_size == sizeof (bplist_phys_t)); + + mutex_exit(&bpl->bpl_lock); + return (0); +} + +void +bplist_close(bplist_t *bpl) +{ + mutex_enter(&bpl->bpl_lock); + + ASSERT(bpl->bpl_queue == NULL); + + if (bpl->bpl_cached_dbuf) { + dmu_buf_rele(bpl->bpl_cached_dbuf, bpl); + bpl->bpl_cached_dbuf = NULL; + } + if (bpl->bpl_dbuf) { + dmu_buf_rele(bpl->bpl_dbuf, bpl); + bpl->bpl_dbuf = NULL; + bpl->bpl_phys = NULL; + } + + mutex_exit(&bpl->bpl_lock); +} + +boolean_t +bplist_empty(bplist_t *bpl) +{ + boolean_t rv; + + if (bpl->bpl_object == 0) + return (B_TRUE); + + mutex_enter(&bpl->bpl_lock); + VERIFY(0 == bplist_hold(bpl)); /* XXX */ + rv = (bpl->bpl_phys->bpl_entries == 0); + mutex_exit(&bpl->bpl_lock); + + return (rv); +} + +static int +bplist_cache(bplist_t *bpl, uint64_t blkid) +{ + int err = 0; + + if (bpl->bpl_cached_dbuf == NULL || + bpl->bpl_cached_dbuf->db_offset != (blkid << bpl->bpl_blockshift)) { + if (bpl->bpl_cached_dbuf != NULL) + dmu_buf_rele(bpl->bpl_cached_dbuf, bpl); + err = dmu_buf_hold(bpl->bpl_mos, + bpl->bpl_object, blkid << bpl->bpl_blockshift, + bpl, &bpl->bpl_cached_dbuf); + ASSERT(err || bpl->bpl_cached_dbuf->db_size == + 1ULL << bpl->bpl_blockshift); + } + return (err); +} + +int +bplist_iterate(bplist_t *bpl, uint64_t *itorp, blkptr_t *bp) +{ + uint64_t blk, off; + blkptr_t *bparray; + int err; + + mutex_enter(&bpl->bpl_lock); + + err = bplist_hold(bpl); + if (err) { + mutex_exit(&bpl->bpl_lock); + return (err); + } + + if (*itorp >= bpl->bpl_phys->bpl_entries) { + mutex_exit(&bpl->bpl_lock); + return (ENOENT); + } + + blk = *itorp >> bpl->bpl_bpshift; + off = P2PHASE(*itorp, 1ULL << bpl->bpl_bpshift); + + err = bplist_cache(bpl, blk); + if (err) { + mutex_exit(&bpl->bpl_lock); + return (err); + } + + bparray = bpl->bpl_cached_dbuf->db_data; + *bp = bparray[off]; + (*itorp)++; + mutex_exit(&bpl->bpl_lock); + return (0); +} + +int +bplist_enqueue(bplist_t *bpl, const blkptr_t *bp, dmu_tx_t *tx) +{ + uint64_t blk, off; + blkptr_t *bparray; + int err; + + ASSERT(!BP_IS_HOLE(bp)); + mutex_enter(&bpl->bpl_lock); + err = bplist_hold(bpl); + if (err) + return (err); + + blk = bpl->bpl_phys->bpl_entries >> bpl->bpl_bpshift; + off = P2PHASE(bpl->bpl_phys->bpl_entries, 1ULL << bpl->bpl_bpshift); + + err = bplist_cache(bpl, blk); + if (err) { + mutex_exit(&bpl->bpl_lock); + return (err); + } + + dmu_buf_will_dirty(bpl->bpl_cached_dbuf, tx); + bparray = bpl->bpl_cached_dbuf->db_data; + bparray[off] = *bp; + + /* We never need the fill count. */ + bparray[off].blk_fill = 0; + + /* The bplist will compress better if we can leave off the checksum */ + bzero(&bparray[off].blk_cksum, sizeof (bparray[off].blk_cksum)); + + dmu_buf_will_dirty(bpl->bpl_dbuf, tx); + bpl->bpl_phys->bpl_entries++; + bpl->bpl_phys->bpl_bytes += + bp_get_dasize(dmu_objset_spa(bpl->bpl_mos), bp); + if (bpl->bpl_havecomp) { + bpl->bpl_phys->bpl_comp += BP_GET_PSIZE(bp); + bpl->bpl_phys->bpl_uncomp += BP_GET_UCSIZE(bp); + } + mutex_exit(&bpl->bpl_lock); + + return (0); +} + +/* + * Deferred entry; will be written later by bplist_sync(). + */ +void +bplist_enqueue_deferred(bplist_t *bpl, const blkptr_t *bp) +{ + bplist_q_t *bpq = kmem_alloc(sizeof (*bpq), KM_SLEEP); + + ASSERT(!BP_IS_HOLE(bp)); + mutex_enter(&bpl->bpl_lock); + bpq->bpq_blk = *bp; + bpq->bpq_next = bpl->bpl_queue; + bpl->bpl_queue = bpq; + mutex_exit(&bpl->bpl_lock); +} + +void +bplist_sync(bplist_t *bpl, dmu_tx_t *tx) +{ + bplist_q_t *bpq; + + mutex_enter(&bpl->bpl_lock); + while ((bpq = bpl->bpl_queue) != NULL) { + bpl->bpl_queue = bpq->bpq_next; + mutex_exit(&bpl->bpl_lock); + VERIFY(0 == bplist_enqueue(bpl, &bpq->bpq_blk, tx)); + kmem_free(bpq, sizeof (*bpq)); + mutex_enter(&bpl->bpl_lock); + } + mutex_exit(&bpl->bpl_lock); +} + +void +bplist_vacate(bplist_t *bpl, dmu_tx_t *tx) +{ + mutex_enter(&bpl->bpl_lock); + ASSERT3P(bpl->bpl_queue, ==, NULL); + VERIFY(0 == bplist_hold(bpl)); + dmu_buf_will_dirty(bpl->bpl_dbuf, tx); + VERIFY(0 == dmu_free_range(bpl->bpl_mos, + bpl->bpl_object, 0, -1ULL, tx)); + bpl->bpl_phys->bpl_entries = 0; + bpl->bpl_phys->bpl_bytes = 0; + if (bpl->bpl_havecomp) { + bpl->bpl_phys->bpl_comp = 0; + bpl->bpl_phys->bpl_uncomp = 0; + } + mutex_exit(&bpl->bpl_lock); +} + +int +bplist_space(bplist_t *bpl, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) +{ + int err; + + mutex_enter(&bpl->bpl_lock); + + err = bplist_hold(bpl); + if (err) { + mutex_exit(&bpl->bpl_lock); + return (err); + } + + *usedp = bpl->bpl_phys->bpl_bytes; + if (bpl->bpl_havecomp) { + *compp = bpl->bpl_phys->bpl_comp; + *uncompp = bpl->bpl_phys->bpl_uncomp; + } + mutex_exit(&bpl->bpl_lock); + + if (!bpl->bpl_havecomp) { + uint64_t itor = 0, comp = 0, uncomp = 0; + blkptr_t bp; + + while ((err = bplist_iterate(bpl, &itor, &bp)) == 0) { + comp += BP_GET_PSIZE(&bp); + uncomp += BP_GET_UCSIZE(&bp); + } + if (err == ENOENT) + err = 0; + *compp = comp; + *uncompp = uncomp; + } + + return (err); +} + +/* + * Return (in *dasizep) the amount of space on the deadlist which is: + * mintxg < blk_birth <= maxtxg + */ +int +bplist_space_birthrange(bplist_t *bpl, uint64_t mintxg, uint64_t maxtxg, + uint64_t *dasizep) +{ + uint64_t size = 0; + uint64_t itor = 0; + blkptr_t bp; + int err; + + /* + * As an optimization, if they want the whole txg range, just + * get bpl_bytes rather than iterating over the bps. + */ + if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX) { + mutex_enter(&bpl->bpl_lock); + err = bplist_hold(bpl); + if (err == 0) + *dasizep = bpl->bpl_phys->bpl_bytes; + mutex_exit(&bpl->bpl_lock); + return (err); + } + + while ((err = bplist_iterate(bpl, &itor, &bp)) == 0) { + if (bp.blk_birth > mintxg && bp.blk_birth <= maxtxg) { + size += + bp_get_dasize(dmu_objset_spa(bpl->bpl_mos), &bp); + } + } + if (err == ENOENT) + err = 0; + *dasizep = size; + return (err); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dbuf.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dbuf.c new file mode 100644 index 000000000000..d04610317a4e --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dbuf.c @@ -0,0 +1,2356 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void dbuf_destroy(dmu_buf_impl_t *db); +static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); +static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); +static arc_done_func_t dbuf_write_ready; +static arc_done_func_t dbuf_write_done; +static zio_done_func_t dbuf_skip_write_ready; +static zio_done_func_t dbuf_skip_write_done; + +/* + * Global data structures and functions for the dbuf cache. + */ +static kmem_cache_t *dbuf_cache; + +/* ARGSUSED */ +static int +dbuf_cons(void *vdb, void *unused, int kmflag) +{ + dmu_buf_impl_t *db = vdb; + bzero(db, sizeof (dmu_buf_impl_t)); + + mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); + cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); + refcount_create(&db->db_holds); + return (0); +} + +/* ARGSUSED */ +static void +dbuf_dest(void *vdb, void *unused) +{ + dmu_buf_impl_t *db = vdb; + mutex_destroy(&db->db_mtx); + cv_destroy(&db->db_changed); + refcount_destroy(&db->db_holds); +} + +/* + * dbuf hash table routines + */ +static dbuf_hash_table_t dbuf_hash_table; + +static uint64_t dbuf_hash_count; + +static uint64_t +dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) +{ + uintptr_t osv = (uintptr_t)os; + uint64_t crc = -1ULL; + + ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; + + crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); + + return (crc); +} + +#define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); + +#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ + ((dbuf)->db.db_object == (obj) && \ + (dbuf)->db_objset == (os) && \ + (dbuf)->db_level == (level) && \ + (dbuf)->db_blkid == (blkid)) + +dmu_buf_impl_t * +dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) +{ + dbuf_hash_table_t *h = &dbuf_hash_table; + objset_impl_t *os = dn->dn_objset; + uint64_t obj = dn->dn_object; + uint64_t hv = DBUF_HASH(os, obj, level, blkid); + uint64_t idx = hv & h->hash_table_mask; + dmu_buf_impl_t *db; + + mutex_enter(DBUF_HASH_MUTEX(h, idx)); + for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { + if (DBUF_EQUAL(db, os, obj, level, blkid)) { + mutex_enter(&db->db_mtx); + if (db->db_state != DB_EVICTING) { + mutex_exit(DBUF_HASH_MUTEX(h, idx)); + return (db); + } + mutex_exit(&db->db_mtx); + } + } + mutex_exit(DBUF_HASH_MUTEX(h, idx)); + return (NULL); +} + +/* + * Insert an entry into the hash table. If there is already an element + * equal to elem in the hash table, then the already existing element + * will be returned and the new element will not be inserted. + * Otherwise returns NULL. + */ +static dmu_buf_impl_t * +dbuf_hash_insert(dmu_buf_impl_t *db) +{ + dbuf_hash_table_t *h = &dbuf_hash_table; + objset_impl_t *os = db->db_objset; + uint64_t obj = db->db.db_object; + int level = db->db_level; + uint64_t blkid = db->db_blkid; + uint64_t hv = DBUF_HASH(os, obj, level, blkid); + uint64_t idx = hv & h->hash_table_mask; + dmu_buf_impl_t *dbf; + + mutex_enter(DBUF_HASH_MUTEX(h, idx)); + for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { + if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { + mutex_enter(&dbf->db_mtx); + if (dbf->db_state != DB_EVICTING) { + mutex_exit(DBUF_HASH_MUTEX(h, idx)); + return (dbf); + } + mutex_exit(&dbf->db_mtx); + } + } + + mutex_enter(&db->db_mtx); + db->db_hash_next = h->hash_table[idx]; + h->hash_table[idx] = db; + mutex_exit(DBUF_HASH_MUTEX(h, idx)); + atomic_add_64(&dbuf_hash_count, 1); + + return (NULL); +} + +/* + * Remove an entry from the hash table. This operation will + * fail if there are any existing holds on the db. + */ +static void +dbuf_hash_remove(dmu_buf_impl_t *db) +{ + dbuf_hash_table_t *h = &dbuf_hash_table; + uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, + db->db_level, db->db_blkid); + uint64_t idx = hv & h->hash_table_mask; + dmu_buf_impl_t *dbf, **dbp; + + /* + * We musn't hold db_mtx to maintin lock ordering: + * DBUF_HASH_MUTEX > db_mtx. + */ + ASSERT(refcount_is_zero(&db->db_holds)); + ASSERT(db->db_state == DB_EVICTING); + ASSERT(!MUTEX_HELD(&db->db_mtx)); + + mutex_enter(DBUF_HASH_MUTEX(h, idx)); + dbp = &h->hash_table[idx]; + while ((dbf = *dbp) != db) { + dbp = &dbf->db_hash_next; + ASSERT(dbf != NULL); + } + *dbp = db->db_hash_next; + db->db_hash_next = NULL; + mutex_exit(DBUF_HASH_MUTEX(h, idx)); + atomic_add_64(&dbuf_hash_count, -1); +} + +static arc_evict_func_t dbuf_do_evict; + +static void +dbuf_evict_user(dmu_buf_impl_t *db) +{ + ASSERT(MUTEX_HELD(&db->db_mtx)); + + if (db->db_level != 0 || db->db_evict_func == NULL) + return; + + if (db->db_user_data_ptr_ptr) + *db->db_user_data_ptr_ptr = db->db.db_data; + db->db_evict_func(&db->db, db->db_user_ptr); + db->db_user_ptr = NULL; + db->db_user_data_ptr_ptr = NULL; + db->db_evict_func = NULL; +} + +void +dbuf_evict(dmu_buf_impl_t *db) +{ + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(db->db_buf == NULL); + ASSERT(db->db_data_pending == NULL); + + dbuf_clear(db); + dbuf_destroy(db); +} + +void +dbuf_init(void) +{ + uint64_t hsize = 1ULL << 16; + dbuf_hash_table_t *h = &dbuf_hash_table; + int i; + + /* + * The hash table is big enough to fill all of physical memory + * with an average 4K block size. The table will take up + * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). + */ + while (hsize * 4096 < physmem * PAGESIZE) + hsize <<= 1; + +retry: + h->hash_table_mask = hsize - 1; + h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); + if (h->hash_table == NULL) { + /* XXX - we should really return an error instead of assert */ + ASSERT(hsize > (1ULL << 10)); + hsize >>= 1; + goto retry; + } + + dbuf_cache = kmem_cache_create("dmu_buf_impl_t", + sizeof (dmu_buf_impl_t), + 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); + + for (i = 0; i < DBUF_MUTEXES; i++) + mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); +} + +void +dbuf_fini(void) +{ + dbuf_hash_table_t *h = &dbuf_hash_table; + int i; + + for (i = 0; i < DBUF_MUTEXES; i++) + mutex_destroy(&h->hash_mutexes[i]); + kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); + kmem_cache_destroy(dbuf_cache); +} + +/* + * Other stuff. + */ + +#ifdef ZFS_DEBUG +static void +dbuf_verify(dmu_buf_impl_t *db) +{ + dnode_t *dn = db->db_dnode; + + ASSERT(MUTEX_HELD(&db->db_mtx)); + + if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) + return; + + ASSERT(db->db_objset != NULL); + if (dn == NULL) { + ASSERT(db->db_parent == NULL); + ASSERT(db->db_blkptr == NULL); + } else { + ASSERT3U(db->db.db_object, ==, dn->dn_object); + ASSERT3P(db->db_objset, ==, dn->dn_objset); + ASSERT3U(db->db_level, <, dn->dn_nlevels); + ASSERT(db->db_blkid == DB_BONUS_BLKID || + list_head(&dn->dn_dbufs)); + } + if (db->db_blkid == DB_BONUS_BLKID) { + ASSERT(dn != NULL); + ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); + ASSERT3U(db->db.db_offset, ==, DB_BONUS_BLKID); + } else { + ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); + } + + /* + * We can't assert that db_size matches dn_datablksz because it + * can be momentarily different when another thread is doing + * dnode_set_blksz(). + */ + if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { + dbuf_dirty_record_t *dr = db->db_data_pending; + /* + * It should only be modified in syncing context, so + * make sure we only have one copy of the data. + */ + ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); + } + + /* verify db->db_blkptr */ + if (db->db_blkptr) { + if (db->db_parent == dn->dn_dbuf) { + /* db is pointed to by the dnode */ + /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ + if (db->db.db_object == DMU_META_DNODE_OBJECT) + ASSERT(db->db_parent == NULL); + else + ASSERT(db->db_parent != NULL); + ASSERT3P(db->db_blkptr, ==, + &dn->dn_phys->dn_blkptr[db->db_blkid]); + } else { + /* db is pointed to by an indirect block */ + int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; + ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); + ASSERT3U(db->db_parent->db.db_object, ==, + db->db.db_object); + /* + * dnode_grow_indblksz() can make this fail if we don't + * have the struct_rwlock. XXX indblksz no longer + * grows. safe to do this now? + */ + if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) { + ASSERT3P(db->db_blkptr, ==, + ((blkptr_t *)db->db_parent->db.db_data + + db->db_blkid % epb)); + } + } + } + if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && + db->db.db_data && db->db_blkid != DB_BONUS_BLKID && + db->db_state != DB_FILL && !dn->dn_free_txg) { + /* + * If the blkptr isn't set but they have nonzero data, + * it had better be dirty, otherwise we'll lose that + * data when we evict this buffer. + */ + if (db->db_dirtycnt == 0) { + uint64_t *buf = db->db.db_data; + int i; + + for (i = 0; i < db->db.db_size >> 3; i++) { + ASSERT(buf[i] == 0); + } + } + } +} +#endif + +static void +dbuf_update_data(dmu_buf_impl_t *db) +{ + ASSERT(MUTEX_HELD(&db->db_mtx)); + if (db->db_level == 0 && db->db_user_data_ptr_ptr) { + ASSERT(!refcount_is_zero(&db->db_holds)); + *db->db_user_data_ptr_ptr = db->db.db_data; + } +} + +static void +dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) +{ + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); + db->db_buf = buf; + if (buf != NULL) { + ASSERT(buf->b_data != NULL); + db->db.db_data = buf->b_data; + if (!arc_released(buf)) + arc_set_callback(buf, dbuf_do_evict, db); + dbuf_update_data(db); + } else { + dbuf_evict_user(db); + db->db.db_data = NULL; + if (db->db_state != DB_NOFILL) + db->db_state = DB_UNCACHED; + } +} + +uint64_t +dbuf_whichblock(dnode_t *dn, uint64_t offset) +{ + if (dn->dn_datablkshift) { + return (offset >> dn->dn_datablkshift); + } else { + ASSERT3U(offset, <, dn->dn_datablksz); + return (0); + } +} + +static void +dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) +{ + dmu_buf_impl_t *db = vdb; + + mutex_enter(&db->db_mtx); + ASSERT3U(db->db_state, ==, DB_READ); + /* + * All reads are synchronous, so we must have a hold on the dbuf + */ + ASSERT(refcount_count(&db->db_holds) > 0); + ASSERT(db->db_buf == NULL); + ASSERT(db->db.db_data == NULL); + if (db->db_level == 0 && db->db_freed_in_flight) { + /* we were freed in flight; disregard any error */ + arc_release(buf, db); + bzero(buf->b_data, db->db.db_size); + arc_buf_freeze(buf); + db->db_freed_in_flight = FALSE; + dbuf_set_data(db, buf); + db->db_state = DB_CACHED; + } else if (zio == NULL || zio->io_error == 0) { + dbuf_set_data(db, buf); + db->db_state = DB_CACHED; + } else { + ASSERT(db->db_blkid != DB_BONUS_BLKID); + ASSERT3P(db->db_buf, ==, NULL); + VERIFY(arc_buf_remove_ref(buf, db) == 1); + db->db_state = DB_UNCACHED; + } + cv_broadcast(&db->db_changed); + mutex_exit(&db->db_mtx); + dbuf_rele(db, NULL); +} + +static void +dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) +{ + dnode_t *dn = db->db_dnode; + zbookmark_t zb; + uint32_t aflags = ARC_NOWAIT; + arc_buf_t *pbuf; + + ASSERT(!refcount_is_zero(&db->db_holds)); + /* We need the struct_rwlock to prevent db_blkptr from changing. */ + ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(db->db_state == DB_UNCACHED); + ASSERT(db->db_buf == NULL); + + if (db->db_blkid == DB_BONUS_BLKID) { + int bonuslen = dn->dn_bonuslen; + + ASSERT3U(bonuslen, <=, db->db.db_size); + db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); + arc_space_consume(DN_MAX_BONUSLEN); + if (bonuslen < DN_MAX_BONUSLEN) + bzero(db->db.db_data, DN_MAX_BONUSLEN); + bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, + bonuslen); + dbuf_update_data(db); + db->db_state = DB_CACHED; + mutex_exit(&db->db_mtx); + return; + } + + /* + * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() + * processes the delete record and clears the bp while we are waiting + * for the dn_mtx (resulting in a "no" from block_freed). + */ + if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || + (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || + BP_IS_HOLE(db->db_blkptr)))) { + arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); + + dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, + db->db.db_size, db, type)); + bzero(db->db.db_data, db->db.db_size); + db->db_state = DB_CACHED; + *flags |= DB_RF_CACHED; + mutex_exit(&db->db_mtx); + return; + } + + db->db_state = DB_READ; + mutex_exit(&db->db_mtx); + + if (DBUF_IS_L2CACHEABLE(db)) + aflags |= ARC_L2CACHE; + + zb.zb_objset = db->db_objset->os_dsl_dataset ? + db->db_objset->os_dsl_dataset->ds_object : 0; + zb.zb_object = db->db.db_object; + zb.zb_level = db->db_level; + zb.zb_blkid = db->db_blkid; + + dbuf_add_ref(db, NULL); + /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */ + + if (db->db_parent) + pbuf = db->db_parent->db_buf; + else + pbuf = db->db_objset->os_phys_buf; + + (void) arc_read(zio, dn->dn_objset->os_spa, db->db_blkptr, pbuf, + dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, + (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, + &aflags, &zb); + if (aflags & ARC_CACHED) + *flags |= DB_RF_CACHED; +} + +int +dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) +{ + int err = 0; + int havepzio = (zio != NULL); + int prefetch; + + /* + * We don't have to hold the mutex to check db_state because it + * can't be freed while we have a hold on the buffer. + */ + ASSERT(!refcount_is_zero(&db->db_holds)); + + if (db->db_state == DB_NOFILL) + return (EIO); + + if ((flags & DB_RF_HAVESTRUCT) == 0) + rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); + + prefetch = db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && + (flags & DB_RF_NOPREFETCH) == 0 && db->db_dnode != NULL && + DBUF_IS_CACHEABLE(db); + + mutex_enter(&db->db_mtx); + if (db->db_state == DB_CACHED) { + mutex_exit(&db->db_mtx); + if (prefetch) + dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, + db->db.db_size, TRUE); + if ((flags & DB_RF_HAVESTRUCT) == 0) + rw_exit(&db->db_dnode->dn_struct_rwlock); + } else if (db->db_state == DB_UNCACHED) { + if (zio == NULL) { + zio = zio_root(db->db_dnode->dn_objset->os_spa, + NULL, NULL, ZIO_FLAG_CANFAIL); + } + dbuf_read_impl(db, zio, &flags); + + /* dbuf_read_impl has dropped db_mtx for us */ + + if (prefetch) + dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, + db->db.db_size, flags & DB_RF_CACHED); + + if ((flags & DB_RF_HAVESTRUCT) == 0) + rw_exit(&db->db_dnode->dn_struct_rwlock); + + if (!havepzio) + err = zio_wait(zio); + } else { + mutex_exit(&db->db_mtx); + if (prefetch) + dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, + db->db.db_size, TRUE); + if ((flags & DB_RF_HAVESTRUCT) == 0) + rw_exit(&db->db_dnode->dn_struct_rwlock); + + mutex_enter(&db->db_mtx); + if ((flags & DB_RF_NEVERWAIT) == 0) { + while (db->db_state == DB_READ || + db->db_state == DB_FILL) { + ASSERT(db->db_state == DB_READ || + (flags & DB_RF_HAVESTRUCT) == 0); + cv_wait(&db->db_changed, &db->db_mtx); + } + if (db->db_state == DB_UNCACHED) + err = EIO; + } + mutex_exit(&db->db_mtx); + } + + ASSERT(err || havepzio || db->db_state == DB_CACHED); + return (err); +} + +static void +dbuf_noread(dmu_buf_impl_t *db) +{ + ASSERT(!refcount_is_zero(&db->db_holds)); + ASSERT(db->db_blkid != DB_BONUS_BLKID); + mutex_enter(&db->db_mtx); + while (db->db_state == DB_READ || db->db_state == DB_FILL) + cv_wait(&db->db_changed, &db->db_mtx); + if (db->db_state == DB_UNCACHED) { + arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); + + ASSERT(db->db_buf == NULL); + ASSERT(db->db.db_data == NULL); + dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, + db->db.db_size, db, type)); + db->db_state = DB_FILL; + } else if (db->db_state == DB_NOFILL) { + dbuf_set_data(db, NULL); + } else { + ASSERT3U(db->db_state, ==, DB_CACHED); + } + mutex_exit(&db->db_mtx); +} + +/* + * This is our just-in-time copy function. It makes a copy of + * buffers, that have been modified in a previous transaction + * group, before we modify them in the current active group. + * + * This function is used in two places: when we are dirtying a + * buffer for the first time in a txg, and when we are freeing + * a range in a dnode that includes this buffer. + * + * Note that when we are called from dbuf_free_range() we do + * not put a hold on the buffer, we just traverse the active + * dbuf list for the dnode. + */ +static void +dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) +{ + dbuf_dirty_record_t *dr = db->db_last_dirty; + + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(db->db.db_data != NULL); + ASSERT(db->db_level == 0); + ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); + + if (dr == NULL || + (dr->dt.dl.dr_data != + ((db->db_blkid == DB_BONUS_BLKID) ? db->db.db_data : db->db_buf))) + return; + + /* + * If the last dirty record for this dbuf has not yet synced + * and its referencing the dbuf data, either: + * reset the reference to point to a new copy, + * or (if there a no active holders) + * just null out the current db_data pointer. + */ + ASSERT(dr->dr_txg >= txg - 2); + if (db->db_blkid == DB_BONUS_BLKID) { + /* Note that the data bufs here are zio_bufs */ + dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); + arc_space_consume(DN_MAX_BONUSLEN); + bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); + } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { + int size = db->db.db_size; + arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); + dr->dt.dl.dr_data = arc_buf_alloc( + db->db_dnode->dn_objset->os_spa, size, db, type); + bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); + } else { + dbuf_set_data(db, NULL); + } +} + +void +dbuf_unoverride(dbuf_dirty_record_t *dr) +{ + dmu_buf_impl_t *db = dr->dr_dbuf; + uint64_t txg = dr->dr_txg; + + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); + ASSERT(db->db_level == 0); + + if (db->db_blkid == DB_BONUS_BLKID || + dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) + return; + + /* free this block */ + if (!BP_IS_HOLE(&dr->dt.dl.dr_overridden_by)) { + /* XXX can get silent EIO here */ + (void) dsl_free(NULL, + spa_get_dsl(db->db_dnode->dn_objset->os_spa), + txg, &dr->dt.dl.dr_overridden_by, NULL, NULL, ARC_WAIT); + } + dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; + /* + * Release the already-written buffer, so we leave it in + * a consistent dirty state. Note that all callers are + * modifying the buffer, so they will immediately do + * another (redundant) arc_release(). Therefore, leave + * the buf thawed to save the effort of freezing & + * immediately re-thawing it. + */ + arc_release(dr->dt.dl.dr_data, db); +} + +/* + * Evict (if its unreferenced) or clear (if its referenced) any level-0 + * data blocks in the free range, so that any future readers will find + * empty blocks. Also, if we happen accross any level-1 dbufs in the + * range that have not already been marked dirty, mark them dirty so + * they stay in memory. + */ +void +dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db, *db_next; + uint64_t txg = tx->tx_txg; + int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + uint64_t first_l1 = start >> epbs; + uint64_t last_l1 = end >> epbs; + + if (end > dn->dn_maxblkid) { + end = dn->dn_maxblkid; + last_l1 = end >> epbs; + } + dprintf_dnode(dn, "start=%llu end=%llu\n", start, end); + mutex_enter(&dn->dn_dbufs_mtx); + for (db = list_head(&dn->dn_dbufs); db; db = db_next) { + db_next = list_next(&dn->dn_dbufs, db); + ASSERT(db->db_blkid != DB_BONUS_BLKID); + + if (db->db_level == 1 && + db->db_blkid >= first_l1 && db->db_blkid <= last_l1) { + mutex_enter(&db->db_mtx); + if (db->db_last_dirty && + db->db_last_dirty->dr_txg < txg) { + dbuf_add_ref(db, FTAG); + mutex_exit(&db->db_mtx); + dbuf_will_dirty(db, tx); + dbuf_rele(db, FTAG); + } else { + mutex_exit(&db->db_mtx); + } + } + + if (db->db_level != 0) + continue; + dprintf_dbuf(db, "found buf %s\n", ""); + if (db->db_blkid < start || db->db_blkid > end) + continue; + + /* found a level 0 buffer in the range */ + if (dbuf_undirty(db, tx)) + continue; + + mutex_enter(&db->db_mtx); + if (db->db_state == DB_UNCACHED || + db->db_state == DB_NOFILL || + db->db_state == DB_EVICTING) { + ASSERT(db->db.db_data == NULL); + mutex_exit(&db->db_mtx); + continue; + } + if (db->db_state == DB_READ || db->db_state == DB_FILL) { + /* will be handled in dbuf_read_done or dbuf_rele */ + db->db_freed_in_flight = TRUE; + mutex_exit(&db->db_mtx); + continue; + } + if (refcount_count(&db->db_holds) == 0) { + ASSERT(db->db_buf); + dbuf_clear(db); + continue; + } + /* The dbuf is referenced */ + + if (db->db_last_dirty != NULL) { + dbuf_dirty_record_t *dr = db->db_last_dirty; + + if (dr->dr_txg == txg) { + /* + * This buffer is "in-use", re-adjust the file + * size to reflect that this buffer may + * contain new data when we sync. + */ + if (db->db_blkid > dn->dn_maxblkid) + dn->dn_maxblkid = db->db_blkid; + dbuf_unoverride(dr); + } else { + /* + * This dbuf is not dirty in the open context. + * Either uncache it (if its not referenced in + * the open context) or reset its contents to + * empty. + */ + dbuf_fix_old_data(db, txg); + } + } + /* clear the contents if its cached */ + if (db->db_state == DB_CACHED) { + ASSERT(db->db.db_data != NULL); + arc_release(db->db_buf, db); + bzero(db->db.db_data, db->db.db_size); + arc_buf_freeze(db->db_buf); + } + + mutex_exit(&db->db_mtx); + } + mutex_exit(&dn->dn_dbufs_mtx); +} + +static int +dbuf_block_freeable(dmu_buf_impl_t *db) +{ + dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; + uint64_t birth_txg = 0; + + /* + * We don't need any locking to protect db_blkptr: + * If it's syncing, then db_last_dirty will be set + * so we'll ignore db_blkptr. + */ + ASSERT(MUTEX_HELD(&db->db_mtx)); + if (db->db_last_dirty) + birth_txg = db->db_last_dirty->dr_txg; + else if (db->db_blkptr) + birth_txg = db->db_blkptr->blk_birth; + + /* If we don't exist or are in a snapshot, we can't be freed */ + if (birth_txg) + return (ds == NULL || + dsl_dataset_block_freeable(ds, birth_txg)); + else + return (FALSE); +} + +void +dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) +{ + arc_buf_t *buf, *obuf; + int osize = db->db.db_size; + arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); + + ASSERT(db->db_blkid != DB_BONUS_BLKID); + + /* XXX does *this* func really need the lock? */ + ASSERT(RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)); + + /* + * This call to dbuf_will_dirty() with the dn_struct_rwlock held + * is OK, because there can be no other references to the db + * when we are changing its size, so no concurrent DB_FILL can + * be happening. + */ + /* + * XXX we should be doing a dbuf_read, checking the return + * value and returning that up to our callers + */ + dbuf_will_dirty(db, tx); + + /* create the data buffer for the new block */ + buf = arc_buf_alloc(db->db_dnode->dn_objset->os_spa, size, db, type); + + /* copy old block data to the new block */ + obuf = db->db_buf; + bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); + /* zero the remainder */ + if (size > osize) + bzero((uint8_t *)buf->b_data + osize, size - osize); + + mutex_enter(&db->db_mtx); + dbuf_set_data(db, buf); + VERIFY(arc_buf_remove_ref(obuf, db) == 1); + db->db.db_size = size; + + if (db->db_level == 0) { + ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); + db->db_last_dirty->dt.dl.dr_data = buf; + } + mutex_exit(&db->db_mtx); + + dnode_willuse_space(db->db_dnode, size-osize, tx); +} + +dbuf_dirty_record_t * +dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) +{ + dnode_t *dn = db->db_dnode; + objset_impl_t *os = dn->dn_objset; + dbuf_dirty_record_t **drp, *dr; + int drop_struct_lock = FALSE; + boolean_t do_free_accounting = B_FALSE; + int txgoff = tx->tx_txg & TXG_MASK; + + ASSERT(tx->tx_txg != 0); + ASSERT(!refcount_is_zero(&db->db_holds)); + DMU_TX_DIRTY_BUF(tx, db); + + /* + * Shouldn't dirty a regular buffer in syncing context. Private + * objects may be dirtied in syncing context, but only if they + * were already pre-dirtied in open context. + * XXX We may want to prohibit dirtying in syncing context even + * if they did pre-dirty. + */ + ASSERT(!dmu_tx_is_syncing(tx) || + BP_IS_HOLE(dn->dn_objset->os_rootbp) || + dn->dn_object == DMU_META_DNODE_OBJECT || + dn->dn_objset->os_dsl_dataset == NULL || + dsl_dir_is_private(dn->dn_objset->os_dsl_dataset->ds_dir)); + + /* + * We make this assert for private objects as well, but after we + * check if we're already dirty. They are allowed to re-dirty + * in syncing context. + */ + ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || + dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == + (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); + + mutex_enter(&db->db_mtx); + /* + * XXX make this true for indirects too? The problem is that + * transactions created with dmu_tx_create_assigned() from + * syncing context don't bother holding ahead. + */ + ASSERT(db->db_level != 0 || + db->db_state == DB_CACHED || db->db_state == DB_FILL || + db->db_state == DB_NOFILL); + + mutex_enter(&dn->dn_mtx); + /* + * Don't set dirtyctx to SYNC if we're just modifying this as we + * initialize the objset. + */ + if (dn->dn_dirtyctx == DN_UNDIRTIED && + !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { + dn->dn_dirtyctx = + (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); + ASSERT(dn->dn_dirtyctx_firstset == NULL); + dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); + } + mutex_exit(&dn->dn_mtx); + + /* + * If this buffer is already dirty, we're done. + */ + drp = &db->db_last_dirty; + ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || + db->db.db_object == DMU_META_DNODE_OBJECT); + while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) + drp = &dr->dr_next; + if (dr && dr->dr_txg == tx->tx_txg) { + if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { + /* + * If this buffer has already been written out, + * we now need to reset its state. + */ + dbuf_unoverride(dr); + if (db->db.db_object != DMU_META_DNODE_OBJECT) + arc_buf_thaw(db->db_buf); + } + mutex_exit(&db->db_mtx); + return (dr); + } + + /* + * Only valid if not already dirty. + */ + ASSERT(dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == + (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); + + ASSERT3U(dn->dn_nlevels, >, db->db_level); + ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || + dn->dn_phys->dn_nlevels > db->db_level || + dn->dn_next_nlevels[txgoff] > db->db_level || + dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || + dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); + + /* + * We should only be dirtying in syncing context if it's the + * mos, a spa os, or we're initializing the os. However, we are + * allowed to dirty in syncing context provided we already + * dirtied it in open context. Hence we must make this + * assertion only if we're not already dirty. + */ + ASSERT(!dmu_tx_is_syncing(tx) || + os->os_dsl_dataset == NULL || + !dsl_dir_is_private(os->os_dsl_dataset->ds_dir) || + !BP_IS_HOLE(os->os_rootbp)); + ASSERT(db->db.db_size != 0); + + dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); + + if (db->db_blkid != DB_BONUS_BLKID) { + /* + * Update the accounting. + * Note: we delay "free accounting" until after we drop + * the db_mtx. This keeps us from grabbing other locks + * (and possibly deadlocking) in bp_get_dasize() while + * also holding the db_mtx. + */ + dnode_willuse_space(dn, db->db.db_size, tx); + do_free_accounting = dbuf_block_freeable(db); + } + + /* + * If this buffer is dirty in an old transaction group we need + * to make a copy of it so that the changes we make in this + * transaction group won't leak out when we sync the older txg. + */ + dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); + if (db->db_level == 0) { + void *data_old = db->db_buf; + + if (db->db_state != DB_NOFILL) { + if (db->db_blkid == DB_BONUS_BLKID) { + dbuf_fix_old_data(db, tx->tx_txg); + data_old = db->db.db_data; + } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { + /* + * Release the data buffer from the cache so + * that we can modify it without impacting + * possible other users of this cached data + * block. Note that indirect blocks and + * private objects are not released until the + * syncing state (since they are only modified + * then). + */ + arc_release(db->db_buf, db); + dbuf_fix_old_data(db, tx->tx_txg); + data_old = db->db_buf; + } + ASSERT(data_old != NULL); + } + dr->dt.dl.dr_data = data_old; + } else { + mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); + list_create(&dr->dt.di.dr_children, + sizeof (dbuf_dirty_record_t), + offsetof(dbuf_dirty_record_t, dr_dirty_node)); + } + dr->dr_dbuf = db; + dr->dr_txg = tx->tx_txg; + dr->dr_next = *drp; + *drp = dr; + + /* + * We could have been freed_in_flight between the dbuf_noread + * and dbuf_dirty. We win, as though the dbuf_noread() had + * happened after the free. + */ + if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { + mutex_enter(&dn->dn_mtx); + dnode_clear_range(dn, db->db_blkid, 1, tx); + mutex_exit(&dn->dn_mtx); + db->db_freed_in_flight = FALSE; + } + + /* + * This buffer is now part of this txg + */ + dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); + db->db_dirtycnt += 1; + ASSERT3U(db->db_dirtycnt, <=, 3); + + mutex_exit(&db->db_mtx); + + if (db->db_blkid == DB_BONUS_BLKID) { + mutex_enter(&dn->dn_mtx); + ASSERT(!list_link_active(&dr->dr_dirty_node)); + list_insert_tail(&dn->dn_dirty_records[txgoff], dr); + mutex_exit(&dn->dn_mtx); + dnode_setdirty(dn, tx); + return (dr); + } else if (do_free_accounting) { + blkptr_t *bp = db->db_blkptr; + int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? + bp_get_dasize(os->os_spa, bp) : db->db.db_size; + /* + * This is only a guess -- if the dbuf is dirty + * in a previous txg, we don't know how much + * space it will use on disk yet. We should + * really have the struct_rwlock to access + * db_blkptr, but since this is just a guess, + * it's OK if we get an odd answer. + */ + dnode_willuse_space(dn, -willfree, tx); + } + + if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { + rw_enter(&dn->dn_struct_rwlock, RW_READER); + drop_struct_lock = TRUE; + } + + if (db->db_level == 0) { + dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); + ASSERT(dn->dn_maxblkid >= db->db_blkid); + } + + if (db->db_level+1 < dn->dn_nlevels) { + dmu_buf_impl_t *parent = db->db_parent; + dbuf_dirty_record_t *di; + int parent_held = FALSE; + + if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { + int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + + parent = dbuf_hold_level(dn, db->db_level+1, + db->db_blkid >> epbs, FTAG); + parent_held = TRUE; + } + if (drop_struct_lock) + rw_exit(&dn->dn_struct_rwlock); + ASSERT3U(db->db_level+1, ==, parent->db_level); + di = dbuf_dirty(parent, tx); + if (parent_held) + dbuf_rele(parent, FTAG); + + mutex_enter(&db->db_mtx); + /* possible race with dbuf_undirty() */ + if (db->db_last_dirty == dr || + dn->dn_object == DMU_META_DNODE_OBJECT) { + mutex_enter(&di->dt.di.dr_mtx); + ASSERT3U(di->dr_txg, ==, tx->tx_txg); + ASSERT(!list_link_active(&dr->dr_dirty_node)); + list_insert_tail(&di->dt.di.dr_children, dr); + mutex_exit(&di->dt.di.dr_mtx); + dr->dr_parent = di; + } + mutex_exit(&db->db_mtx); + } else { + ASSERT(db->db_level+1 == dn->dn_nlevels); + ASSERT(db->db_blkid < dn->dn_nblkptr); + ASSERT(db->db_parent == NULL || + db->db_parent == db->db_dnode->dn_dbuf); + mutex_enter(&dn->dn_mtx); + ASSERT(!list_link_active(&dr->dr_dirty_node)); + list_insert_tail(&dn->dn_dirty_records[txgoff], dr); + mutex_exit(&dn->dn_mtx); + if (drop_struct_lock) + rw_exit(&dn->dn_struct_rwlock); + } + + dnode_setdirty(dn, tx); + return (dr); +} + +static int +dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) +{ + dnode_t *dn = db->db_dnode; + uint64_t txg = tx->tx_txg; + dbuf_dirty_record_t *dr, **drp; + + ASSERT(txg != 0); + ASSERT(db->db_blkid != DB_BONUS_BLKID); + + mutex_enter(&db->db_mtx); + + /* + * If this buffer is not dirty, we're done. + */ + for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) + if (dr->dr_txg <= txg) + break; + if (dr == NULL || dr->dr_txg < txg) { + mutex_exit(&db->db_mtx); + return (0); + } + ASSERT(dr->dr_txg == txg); + + /* + * If this buffer is currently held, we cannot undirty + * it, since one of the current holders may be in the + * middle of an update. Note that users of dbuf_undirty() + * should not place a hold on the dbuf before the call. + */ + if (refcount_count(&db->db_holds) > db->db_dirtycnt) { + mutex_exit(&db->db_mtx); + /* Make sure we don't toss this buffer at sync phase */ + mutex_enter(&dn->dn_mtx); + dnode_clear_range(dn, db->db_blkid, 1, tx); + mutex_exit(&dn->dn_mtx); + return (0); + } + + dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); + + ASSERT(db->db.db_size != 0); + + /* XXX would be nice to fix up dn_towrite_space[] */ + + *drp = dr->dr_next; + + if (dr->dr_parent) { + mutex_enter(&dr->dr_parent->dt.di.dr_mtx); + list_remove(&dr->dr_parent->dt.di.dr_children, dr); + mutex_exit(&dr->dr_parent->dt.di.dr_mtx); + } else if (db->db_level+1 == dn->dn_nlevels) { + ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); + mutex_enter(&dn->dn_mtx); + list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); + mutex_exit(&dn->dn_mtx); + } + + if (db->db_level == 0) { + if (db->db_state != DB_NOFILL) { + dbuf_unoverride(dr); + + ASSERT(db->db_buf != NULL); + ASSERT(dr->dt.dl.dr_data != NULL); + if (dr->dt.dl.dr_data != db->db_buf) + VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, + db) == 1); + } + } else { + ASSERT(db->db_buf != NULL); + ASSERT(list_head(&dr->dt.di.dr_children) == NULL); + mutex_destroy(&dr->dt.di.dr_mtx); + list_destroy(&dr->dt.di.dr_children); + } + kmem_free(dr, sizeof (dbuf_dirty_record_t)); + + ASSERT(db->db_dirtycnt > 0); + db->db_dirtycnt -= 1; + + if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { + arc_buf_t *buf = db->db_buf; + + ASSERT(arc_released(buf)); + dbuf_set_data(db, NULL); + VERIFY(arc_buf_remove_ref(buf, db) == 1); + dbuf_evict(db); + return (1); + } + + mutex_exit(&db->db_mtx); + return (0); +} + +#pragma weak dmu_buf_will_dirty = dbuf_will_dirty +void +dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) +{ + int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; + + ASSERT(tx->tx_txg != 0); + ASSERT(!refcount_is_zero(&db->db_holds)); + + if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) + rf |= DB_RF_HAVESTRUCT; + (void) dbuf_read(db, NULL, rf); + (void) dbuf_dirty(db, tx); +} + +void +dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + + db->db_state = DB_NOFILL; + + dmu_buf_will_fill(db_fake, tx); +} + +void +dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + + ASSERT(db->db_blkid != DB_BONUS_BLKID); + ASSERT(tx->tx_txg != 0); + ASSERT(db->db_level == 0); + ASSERT(!refcount_is_zero(&db->db_holds)); + + ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || + dmu_tx_private_ok(tx)); + + dbuf_noread(db); + (void) dbuf_dirty(db, tx); +} + +#pragma weak dmu_buf_fill_done = dbuf_fill_done +/* ARGSUSED */ +void +dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) +{ + mutex_enter(&db->db_mtx); + DBUF_VERIFY(db); + + if (db->db_state == DB_FILL) { + if (db->db_level == 0 && db->db_freed_in_flight) { + ASSERT(db->db_blkid != DB_BONUS_BLKID); + /* we were freed while filling */ + /* XXX dbuf_undirty? */ + bzero(db->db.db_data, db->db.db_size); + db->db_freed_in_flight = FALSE; + } + db->db_state = DB_CACHED; + cv_broadcast(&db->db_changed); + } + mutex_exit(&db->db_mtx); +} + +/* + * "Clear" the contents of this dbuf. This will mark the dbuf + * EVICTING and clear *most* of its references. Unfortunetely, + * when we are not holding the dn_dbufs_mtx, we can't clear the + * entry in the dn_dbufs list. We have to wait until dbuf_destroy() + * in this case. For callers from the DMU we will usually see: + * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() + * For the arc callback, we will usually see: + * dbuf_do_evict()->dbuf_clear();dbuf_destroy() + * Sometimes, though, we will get a mix of these two: + * DMU: dbuf_clear()->arc_buf_evict() + * ARC: dbuf_do_evict()->dbuf_destroy() + */ +void +dbuf_clear(dmu_buf_impl_t *db) +{ + dnode_t *dn = db->db_dnode; + dmu_buf_impl_t *parent = db->db_parent; + dmu_buf_impl_t *dndb = dn->dn_dbuf; + int dbuf_gone = FALSE; + + ASSERT(MUTEX_HELD(&db->db_mtx)); + ASSERT(refcount_is_zero(&db->db_holds)); + + dbuf_evict_user(db); + + if (db->db_state == DB_CACHED) { + ASSERT(db->db.db_data != NULL); + if (db->db_blkid == DB_BONUS_BLKID) { + zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); + arc_space_return(DN_MAX_BONUSLEN); + } + db->db.db_data = NULL; + db->db_state = DB_UNCACHED; + } + + ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); + ASSERT(db->db_data_pending == NULL); + + db->db_state = DB_EVICTING; + db->db_blkptr = NULL; + + if (db->db_blkid != DB_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { + list_remove(&dn->dn_dbufs, db); + dnode_rele(dn, db); + db->db_dnode = NULL; + } + + if (db->db_buf) + dbuf_gone = arc_buf_evict(db->db_buf); + + if (!dbuf_gone) + mutex_exit(&db->db_mtx); + + /* + * If this dbuf is referened from an indirect dbuf, + * decrement the ref count on the indirect dbuf. + */ + if (parent && parent != dndb) + dbuf_rele(parent, db); +} + +static int +dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, + dmu_buf_impl_t **parentp, blkptr_t **bpp) +{ + int nlevels, epbs; + + *parentp = NULL; + *bpp = NULL; + + ASSERT(blkid != DB_BONUS_BLKID); + + if (dn->dn_phys->dn_nlevels == 0) + nlevels = 1; + else + nlevels = dn->dn_phys->dn_nlevels; + + epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + + ASSERT3U(level * epbs, <, 64); + ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); + if (level >= nlevels || + (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { + /* the buffer has no parent yet */ + return (ENOENT); + } else if (level < nlevels-1) { + /* this block is referenced from an indirect block */ + int err = dbuf_hold_impl(dn, level+1, + blkid >> epbs, fail_sparse, NULL, parentp); + if (err) + return (err); + err = dbuf_read(*parentp, NULL, + (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); + if (err) { + dbuf_rele(*parentp, NULL); + *parentp = NULL; + return (err); + } + *bpp = ((blkptr_t *)(*parentp)->db.db_data) + + (blkid & ((1ULL << epbs) - 1)); + return (0); + } else { + /* the block is referenced from the dnode */ + ASSERT3U(level, ==, nlevels-1); + ASSERT(dn->dn_phys->dn_nblkptr == 0 || + blkid < dn->dn_phys->dn_nblkptr); + if (dn->dn_dbuf) { + dbuf_add_ref(dn->dn_dbuf, NULL); + *parentp = dn->dn_dbuf; + } + *bpp = &dn->dn_phys->dn_blkptr[blkid]; + return (0); + } +} + +static dmu_buf_impl_t * +dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, + dmu_buf_impl_t *parent, blkptr_t *blkptr) +{ + objset_impl_t *os = dn->dn_objset; + dmu_buf_impl_t *db, *odb; + + ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); + ASSERT(dn->dn_type != DMU_OT_NONE); + + db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); + + db->db_objset = os; + db->db.db_object = dn->dn_object; + db->db_level = level; + db->db_blkid = blkid; + db->db_last_dirty = NULL; + db->db_dirtycnt = 0; + db->db_dnode = dn; + db->db_parent = parent; + db->db_blkptr = blkptr; + + db->db_user_ptr = NULL; + db->db_user_data_ptr_ptr = NULL; + db->db_evict_func = NULL; + db->db_immediate_evict = 0; + db->db_freed_in_flight = 0; + + if (blkid == DB_BONUS_BLKID) { + ASSERT3P(parent, ==, dn->dn_dbuf); + db->db.db_size = DN_MAX_BONUSLEN - + (dn->dn_nblkptr-1) * sizeof (blkptr_t); + ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); + db->db.db_offset = DB_BONUS_BLKID; + db->db_state = DB_UNCACHED; + /* the bonus dbuf is not placed in the hash table */ + arc_space_consume(sizeof (dmu_buf_impl_t)); + return (db); + } else { + int blocksize = + db->db_level ? 1<dn_indblkshift : dn->dn_datablksz; + db->db.db_size = blocksize; + db->db.db_offset = db->db_blkid * blocksize; + } + + /* + * Hold the dn_dbufs_mtx while we get the new dbuf + * in the hash table *and* added to the dbufs list. + * This prevents a possible deadlock with someone + * trying to look up this dbuf before its added to the + * dn_dbufs list. + */ + mutex_enter(&dn->dn_dbufs_mtx); + db->db_state = DB_EVICTING; + if ((odb = dbuf_hash_insert(db)) != NULL) { + /* someone else inserted it first */ + kmem_cache_free(dbuf_cache, db); + mutex_exit(&dn->dn_dbufs_mtx); + return (odb); + } + list_insert_head(&dn->dn_dbufs, db); + db->db_state = DB_UNCACHED; + mutex_exit(&dn->dn_dbufs_mtx); + arc_space_consume(sizeof (dmu_buf_impl_t)); + + if (parent && parent != dn->dn_dbuf) + dbuf_add_ref(parent, db); + + ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || + refcount_count(&dn->dn_holds) > 0); + (void) refcount_add(&dn->dn_holds, db); + + dprintf_dbuf(db, "db=%p\n", db); + + return (db); +} + +static int +dbuf_do_evict(void *private) +{ + arc_buf_t *buf = private; + dmu_buf_impl_t *db = buf->b_private; + + if (!MUTEX_HELD(&db->db_mtx)) + mutex_enter(&db->db_mtx); + + ASSERT(refcount_is_zero(&db->db_holds)); + + if (db->db_state != DB_EVICTING) { + ASSERT(db->db_state == DB_CACHED); + DBUF_VERIFY(db); + db->db_buf = NULL; + dbuf_evict(db); + } else { + mutex_exit(&db->db_mtx); + dbuf_destroy(db); + } + return (0); +} + +static void +dbuf_destroy(dmu_buf_impl_t *db) +{ + ASSERT(refcount_is_zero(&db->db_holds)); + + if (db->db_blkid != DB_BONUS_BLKID) { + /* + * If this dbuf is still on the dn_dbufs list, + * remove it from that list. + */ + if (db->db_dnode) { + dnode_t *dn = db->db_dnode; + + mutex_enter(&dn->dn_dbufs_mtx); + list_remove(&dn->dn_dbufs, db); + mutex_exit(&dn->dn_dbufs_mtx); + + dnode_rele(dn, db); + db->db_dnode = NULL; + } + dbuf_hash_remove(db); + } + db->db_parent = NULL; + db->db_buf = NULL; + + ASSERT(!list_link_active(&db->db_link)); + ASSERT(db->db.db_data == NULL); + ASSERT(db->db_hash_next == NULL); + ASSERT(db->db_blkptr == NULL); + ASSERT(db->db_data_pending == NULL); + + kmem_cache_free(dbuf_cache, db); + arc_space_return(sizeof (dmu_buf_impl_t)); +} + +void +dbuf_prefetch(dnode_t *dn, uint64_t blkid) +{ + dmu_buf_impl_t *db = NULL; + blkptr_t *bp = NULL; + + ASSERT(blkid != DB_BONUS_BLKID); + ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); + + if (dnode_block_freed(dn, blkid)) + return; + + /* dbuf_find() returns with db_mtx held */ + if (db = dbuf_find(dn, 0, blkid)) { + if (refcount_count(&db->db_holds) > 0) { + /* + * This dbuf is active. We assume that it is + * already CACHED, or else about to be either + * read or filled. + */ + mutex_exit(&db->db_mtx); + return; + } + mutex_exit(&db->db_mtx); + db = NULL; + } + + if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { + if (bp && !BP_IS_HOLE(bp)) { + arc_buf_t *pbuf; + uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; + zbookmark_t zb; + zb.zb_objset = dn->dn_objset->os_dsl_dataset ? + dn->dn_objset->os_dsl_dataset->ds_object : 0; + zb.zb_object = dn->dn_object; + zb.zb_level = 0; + zb.zb_blkid = blkid; + + if (db) + pbuf = db->db_buf; + else + pbuf = dn->dn_objset->os_phys_buf; + + (void) arc_read(NULL, dn->dn_objset->os_spa, + bp, pbuf, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, + &aflags, &zb); + } + if (db) + dbuf_rele(db, NULL); + } +} + +/* + * Returns with db_holds incremented, and db_mtx not held. + * Note: dn_struct_rwlock must be held. + */ +int +dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, + void *tag, dmu_buf_impl_t **dbp) +{ + dmu_buf_impl_t *db, *parent = NULL; + + ASSERT(blkid != DB_BONUS_BLKID); + ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); + ASSERT3U(dn->dn_nlevels, >, level); + + *dbp = NULL; +top: + /* dbuf_find() returns with db_mtx held */ + db = dbuf_find(dn, level, blkid); + + if (db == NULL) { + blkptr_t *bp = NULL; + int err; + + ASSERT3P(parent, ==, NULL); + err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); + if (fail_sparse) { + if (err == 0 && bp && BP_IS_HOLE(bp)) + err = ENOENT; + if (err) { + if (parent) + dbuf_rele(parent, NULL); + return (err); + } + } + if (err && err != ENOENT) + return (err); + db = dbuf_create(dn, level, blkid, parent, bp); + } + + if (db->db_buf && refcount_is_zero(&db->db_holds)) { + arc_buf_add_ref(db->db_buf, db); + if (db->db_buf->b_data == NULL) { + dbuf_clear(db); + if (parent) { + dbuf_rele(parent, NULL); + parent = NULL; + } + goto top; + } + ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); + } + + ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); + + /* + * If this buffer is currently syncing out, and we are are + * still referencing it from db_data, we need to make a copy + * of it in case we decide we want to dirty it again in this txg. + */ + if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && + dn->dn_object != DMU_META_DNODE_OBJECT && + db->db_state == DB_CACHED && db->db_data_pending) { + dbuf_dirty_record_t *dr = db->db_data_pending; + + if (dr->dt.dl.dr_data == db->db_buf) { + arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); + + dbuf_set_data(db, + arc_buf_alloc(db->db_dnode->dn_objset->os_spa, + db->db.db_size, db, type)); + bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, + db->db.db_size); + } + } + + (void) refcount_add(&db->db_holds, tag); + dbuf_update_data(db); + DBUF_VERIFY(db); + mutex_exit(&db->db_mtx); + + /* NOTE: we can't rele the parent until after we drop the db_mtx */ + if (parent) + dbuf_rele(parent, NULL); + + ASSERT3P(db->db_dnode, ==, dn); + ASSERT3U(db->db_blkid, ==, blkid); + ASSERT3U(db->db_level, ==, level); + *dbp = db; + + return (0); +} + +dmu_buf_impl_t * +dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) +{ + dmu_buf_impl_t *db; + int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); + return (err ? NULL : db); +} + +dmu_buf_impl_t * +dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) +{ + dmu_buf_impl_t *db; + int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); + return (err ? NULL : db); +} + +void +dbuf_create_bonus(dnode_t *dn) +{ + ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); + + ASSERT(dn->dn_bonus == NULL); + dn->dn_bonus = dbuf_create(dn, 0, DB_BONUS_BLKID, dn->dn_dbuf, NULL); +} + +#pragma weak dmu_buf_add_ref = dbuf_add_ref +void +dbuf_add_ref(dmu_buf_impl_t *db, void *tag) +{ + int64_t holds = refcount_add(&db->db_holds, tag); + ASSERT(holds > 1); +} + +#pragma weak dmu_buf_rele = dbuf_rele +void +dbuf_rele(dmu_buf_impl_t *db, void *tag) +{ + int64_t holds; + + mutex_enter(&db->db_mtx); + DBUF_VERIFY(db); + + holds = refcount_remove(&db->db_holds, tag); + ASSERT(holds >= 0); + + /* + * We can't freeze indirects if there is a possibility that they + * may be modified in the current syncing context. + */ + if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) + arc_buf_freeze(db->db_buf); + + if (holds == db->db_dirtycnt && + db->db_level == 0 && db->db_immediate_evict) + dbuf_evict_user(db); + + if (holds == 0) { + if (db->db_blkid == DB_BONUS_BLKID) { + mutex_exit(&db->db_mtx); + dnode_rele(db->db_dnode, db); + } else if (db->db_buf == NULL) { + /* + * This is a special case: we never associated this + * dbuf with any data allocated from the ARC. + */ + ASSERT(db->db_state == DB_UNCACHED || + db->db_state == DB_NOFILL); + dbuf_evict(db); + } else if (arc_released(db->db_buf)) { + arc_buf_t *buf = db->db_buf; + /* + * This dbuf has anonymous data associated with it. + */ + dbuf_set_data(db, NULL); + VERIFY(arc_buf_remove_ref(buf, db) == 1); + dbuf_evict(db); + } else { + VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0); + if (!DBUF_IS_CACHEABLE(db)) + dbuf_clear(db); + else + mutex_exit(&db->db_mtx); + } + } else { + mutex_exit(&db->db_mtx); + } +} + +#pragma weak dmu_buf_refcount = dbuf_refcount +uint64_t +dbuf_refcount(dmu_buf_impl_t *db) +{ + return (refcount_count(&db->db_holds)); +} + +void * +dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, + dmu_buf_evict_func_t *evict_func) +{ + return (dmu_buf_update_user(db_fake, NULL, user_ptr, + user_data_ptr_ptr, evict_func)); +} + +void * +dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, + dmu_buf_evict_func_t *evict_func) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + + db->db_immediate_evict = TRUE; + return (dmu_buf_update_user(db_fake, NULL, user_ptr, + user_data_ptr_ptr, evict_func)); +} + +void * +dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, + void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + ASSERT(db->db_level == 0); + + ASSERT((user_ptr == NULL) == (evict_func == NULL)); + + mutex_enter(&db->db_mtx); + + if (db->db_user_ptr == old_user_ptr) { + db->db_user_ptr = user_ptr; + db->db_user_data_ptr_ptr = user_data_ptr_ptr; + db->db_evict_func = evict_func; + + dbuf_update_data(db); + } else { + old_user_ptr = db->db_user_ptr; + } + + mutex_exit(&db->db_mtx); + return (old_user_ptr); +} + +void * +dmu_buf_get_user(dmu_buf_t *db_fake) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + ASSERT(!refcount_is_zero(&db->db_holds)); + + return (db->db_user_ptr); +} + +static void +dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) +{ + /* ASSERT(dmu_tx_is_syncing(tx) */ + ASSERT(MUTEX_HELD(&db->db_mtx)); + + if (db->db_blkptr != NULL) + return; + + if (db->db_level == dn->dn_phys->dn_nlevels-1) { + /* + * This buffer was allocated at a time when there was + * no available blkptrs from the dnode, or it was + * inappropriate to hook it in (i.e., nlevels mis-match). + */ + ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); + ASSERT(db->db_parent == NULL); + db->db_parent = dn->dn_dbuf; + db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; + DBUF_VERIFY(db); + } else { + dmu_buf_impl_t *parent = db->db_parent; + int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + + ASSERT(dn->dn_phys->dn_nlevels > 1); + if (parent == NULL) { + mutex_exit(&db->db_mtx); + rw_enter(&dn->dn_struct_rwlock, RW_READER); + (void) dbuf_hold_impl(dn, db->db_level+1, + db->db_blkid >> epbs, FALSE, db, &parent); + rw_exit(&dn->dn_struct_rwlock); + mutex_enter(&db->db_mtx); + db->db_parent = parent; + } + db->db_blkptr = (blkptr_t *)parent->db.db_data + + (db->db_blkid & ((1ULL << epbs) - 1)); + DBUF_VERIFY(db); + } +} + +static void +dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db = dr->dr_dbuf; + dnode_t *dn = db->db_dnode; + zio_t *zio; + + ASSERT(dmu_tx_is_syncing(tx)); + + dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); + + mutex_enter(&db->db_mtx); + + ASSERT(db->db_level > 0); + DBUF_VERIFY(db); + + if (db->db_buf == NULL) { + mutex_exit(&db->db_mtx); + (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); + mutex_enter(&db->db_mtx); + } + ASSERT3U(db->db_state, ==, DB_CACHED); + ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); + ASSERT(db->db_buf != NULL); + + dbuf_check_blkptr(dn, db); + + db->db_data_pending = dr; + + mutex_exit(&db->db_mtx); + dbuf_write(dr, db->db_buf, tx); + + zio = dr->dr_zio; + mutex_enter(&dr->dt.di.dr_mtx); + dbuf_sync_list(&dr->dt.di.dr_children, tx); + ASSERT(list_head(&dr->dt.di.dr_children) == NULL); + mutex_exit(&dr->dt.di.dr_mtx); + zio_nowait(zio); +} + +static void +dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) +{ + arc_buf_t **datap = &dr->dt.dl.dr_data; + dmu_buf_impl_t *db = dr->dr_dbuf; + dnode_t *dn = db->db_dnode; + objset_impl_t *os = dn->dn_objset; + uint64_t txg = tx->tx_txg; + int blksz; + + ASSERT(dmu_tx_is_syncing(tx)); + + dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); + + mutex_enter(&db->db_mtx); + /* + * To be synced, we must be dirtied. But we + * might have been freed after the dirty. + */ + if (db->db_state == DB_UNCACHED) { + /* This buffer has been freed since it was dirtied */ + ASSERT(db->db.db_data == NULL); + } else if (db->db_state == DB_FILL) { + /* This buffer was freed and is now being re-filled */ + ASSERT(db->db.db_data != dr->dt.dl.dr_data); + } else { + ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); + } + DBUF_VERIFY(db); + + /* + * If this is a bonus buffer, simply copy the bonus data into the + * dnode. It will be written out when the dnode is synced (and it + * will be synced, since it must have been dirty for dbuf_sync to + * be called). + */ + if (db->db_blkid == DB_BONUS_BLKID) { + dbuf_dirty_record_t **drp; + + ASSERT(*datap != NULL); + ASSERT3U(db->db_level, ==, 0); + ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); + bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); + if (*datap != db->db.db_data) { + zio_buf_free(*datap, DN_MAX_BONUSLEN); + arc_space_return(DN_MAX_BONUSLEN); + } + db->db_data_pending = NULL; + drp = &db->db_last_dirty; + while (*drp != dr) + drp = &(*drp)->dr_next; + ASSERT(dr->dr_next == NULL); + *drp = dr->dr_next; + kmem_free(dr, sizeof (dbuf_dirty_record_t)); + ASSERT(db->db_dirtycnt > 0); + db->db_dirtycnt -= 1; + mutex_exit(&db->db_mtx); + dbuf_rele(db, (void *)(uintptr_t)txg); + return; + } + + /* + * This function may have dropped the db_mtx lock allowing a dmu_sync + * operation to sneak in. As a result, we need to ensure that we + * don't check the dr_override_state until we have returned from + * dbuf_check_blkptr. + */ + dbuf_check_blkptr(dn, db); + + /* + * If this buffer is in the middle of an immdiate write, + * wait for the synchronous IO to complete. + */ + while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { + ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); + cv_wait(&db->db_changed, &db->db_mtx); + ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); + } + + /* + * If this dbuf has already been written out via an immediate write, + * just complete the write by copying over the new block pointer and + * updating the accounting via the write-completion functions. + */ + if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { + zio_t zio_fake; + + zio_fake.io_private = &db; + zio_fake.io_error = 0; + zio_fake.io_bp = db->db_blkptr; + zio_fake.io_bp_orig = *db->db_blkptr; + zio_fake.io_txg = txg; + zio_fake.io_flags = 0; + + *db->db_blkptr = dr->dt.dl.dr_overridden_by; + dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; + db->db_data_pending = dr; + dr->dr_zio = &zio_fake; + mutex_exit(&db->db_mtx); + + ASSERT(!DVA_EQUAL(BP_IDENTITY(zio_fake.io_bp), + BP_IDENTITY(&zio_fake.io_bp_orig)) || + BP_IS_HOLE(zio_fake.io_bp)); + + if (BP_IS_OLDER(&zio_fake.io_bp_orig, txg)) + (void) dsl_dataset_block_kill(os->os_dsl_dataset, + &zio_fake.io_bp_orig, dn->dn_zio, tx); + + dbuf_write_ready(&zio_fake, db->db_buf, db); + dbuf_write_done(&zio_fake, db->db_buf, db); + + return; + } + + if (db->db_state != DB_NOFILL) { + blksz = arc_buf_size(*datap); + + if (dn->dn_object != DMU_META_DNODE_OBJECT) { + /* + * If this buffer is currently "in use" (i.e., there + * are active holds and db_data still references it), + * then make a copy before we start the write so that + * any modifications from the open txg will not leak + * into this write. + * + * NOTE: this copy does not need to be made for + * objects only modified in the syncing context (e.g. + * DNONE_DNODE blocks). + */ + if (refcount_count(&db->db_holds) > 1 && + *datap == db->db_buf) { + arc_buf_contents_t type = + DBUF_GET_BUFC_TYPE(db); + *datap = + arc_buf_alloc(os->os_spa, blksz, db, type); + bcopy(db->db.db_data, (*datap)->b_data, blksz); + } + } + + ASSERT(*datap != NULL); + } + db->db_data_pending = dr; + + mutex_exit(&db->db_mtx); + + dbuf_write(dr, *datap, tx); + + ASSERT(!list_link_active(&dr->dr_dirty_node)); + if (dn->dn_object == DMU_META_DNODE_OBJECT) + list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); + else + zio_nowait(dr->dr_zio); +} + +void +dbuf_sync_list(list_t *list, dmu_tx_t *tx) +{ + dbuf_dirty_record_t *dr; + + while (dr = list_head(list)) { + if (dr->dr_zio != NULL) { + /* + * If we find an already initialized zio then we + * are processing the meta-dnode, and we have finished. + * The dbufs for all dnodes are put back on the list + * during processing, so that we can zio_wait() + * these IOs after initiating all child IOs. + */ + ASSERT3U(dr->dr_dbuf->db.db_object, ==, + DMU_META_DNODE_OBJECT); + break; + } + list_remove(list, dr); + if (dr->dr_dbuf->db_level > 0) + dbuf_sync_indirect(dr, tx); + else + dbuf_sync_leaf(dr, tx); + } +} + +static void +dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db = dr->dr_dbuf; + dnode_t *dn = db->db_dnode; + objset_impl_t *os = dn->dn_objset; + dmu_buf_impl_t *parent = db->db_parent; + uint64_t txg = tx->tx_txg; + zbookmark_t zb; + writeprops_t wp = { 0 }; + zio_t *zio; + + if (!BP_IS_HOLE(db->db_blkptr) && + (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE)) { + /* + * Private object buffers are released here rather + * than in dbuf_dirty() since they are only modified + * in the syncing context and we don't want the + * overhead of making multiple copies of the data. + */ + arc_release(data, db); + } else if (db->db_state != DB_NOFILL) { + ASSERT(arc_released(data)); + /* XXX why do we need to thaw here? */ + arc_buf_thaw(data); + } + + if (parent != dn->dn_dbuf) { + ASSERT(parent && parent->db_data_pending); + ASSERT(db->db_level == parent->db_level-1); + ASSERT(arc_released(parent->db_buf)); + zio = parent->db_data_pending->dr_zio; + } else { + ASSERT(db->db_level == dn->dn_phys->dn_nlevels-1); + ASSERT3P(db->db_blkptr, ==, + &dn->dn_phys->dn_blkptr[db->db_blkid]); + zio = dn->dn_zio; + } + + ASSERT(db->db_level == 0 || data == db->db_buf); + ASSERT3U(db->db_blkptr->blk_birth, <=, txg); + ASSERT(zio); + + zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; + zb.zb_object = db->db.db_object; + zb.zb_level = db->db_level; + zb.zb_blkid = db->db_blkid; + + wp.wp_type = dn->dn_type; + wp.wp_level = db->db_level; + wp.wp_copies = os->os_copies; + wp.wp_dncompress = dn->dn_compress; + wp.wp_oscompress = os->os_compress; + wp.wp_dnchecksum = dn->dn_checksum; + wp.wp_oschecksum = os->os_checksum; + + if (BP_IS_OLDER(db->db_blkptr, txg)) + (void) dsl_dataset_block_kill( + os->os_dsl_dataset, db->db_blkptr, zio, tx); + + if (db->db_state == DB_NOFILL) { + zio_prop_t zp = { 0 }; + + write_policy(os->os_spa, &wp, &zp); + dr->dr_zio = zio_write(zio, os->os_spa, + txg, db->db_blkptr, NULL, + db->db.db_size, &zp, dbuf_skip_write_ready, + dbuf_skip_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, + ZIO_FLAG_MUSTSUCCEED, &zb); + } else { + dr->dr_zio = arc_write(zio, os->os_spa, &wp, + DBUF_IS_L2CACHEABLE(db), txg, db->db_blkptr, + data, dbuf_write_ready, dbuf_write_done, db, + ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); + } +} + +/* wrapper function for dbuf_write_ready bypassing ARC */ +static void +dbuf_skip_write_ready(zio_t *zio) +{ + blkptr_t *bp = zio->io_bp; + + if (!BP_IS_GANG(bp)) + zio_skip_write(zio); + + dbuf_write_ready(zio, NULL, zio->io_private); +} + +/* wrapper function for dbuf_write_done bypassing ARC */ +static void +dbuf_skip_write_done(zio_t *zio) +{ + dbuf_write_done(zio, NULL, zio->io_private); +} + +/* ARGSUSED */ +static void +dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) +{ + dmu_buf_impl_t *db = vdb; + dnode_t *dn = db->db_dnode; + objset_impl_t *os = dn->dn_objset; + blkptr_t *bp = zio->io_bp; + blkptr_t *bp_orig = &zio->io_bp_orig; + uint64_t fill = 0; + int old_size, new_size, i; + + ASSERT(db->db_blkptr == bp); + + dprintf_dbuf_bp(db, bp_orig, "bp_orig: %s", ""); + + old_size = bp_get_dasize(os->os_spa, bp_orig); + new_size = bp_get_dasize(os->os_spa, bp); + + dnode_diduse_space(dn, new_size - old_size); + + if (BP_IS_HOLE(bp)) { + dsl_dataset_t *ds = os->os_dsl_dataset; + dmu_tx_t *tx = os->os_synctx; + + if (bp_orig->blk_birth == tx->tx_txg) + (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); + ASSERT3U(bp->blk_fill, ==, 0); + return; + } + + ASSERT(BP_GET_TYPE(bp) == dn->dn_type); + ASSERT(BP_GET_LEVEL(bp) == db->db_level); + + mutex_enter(&db->db_mtx); + + if (db->db_level == 0) { + mutex_enter(&dn->dn_mtx); + if (db->db_blkid > dn->dn_phys->dn_maxblkid) + dn->dn_phys->dn_maxblkid = db->db_blkid; + mutex_exit(&dn->dn_mtx); + + if (dn->dn_type == DMU_OT_DNODE) { + dnode_phys_t *dnp = db->db.db_data; + for (i = db->db.db_size >> DNODE_SHIFT; i > 0; + i--, dnp++) { + if (dnp->dn_type != DMU_OT_NONE) + fill++; + } + } else { + fill = 1; + } + } else { + blkptr_t *ibp = db->db.db_data; + ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); + for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { + if (BP_IS_HOLE(ibp)) + continue; + ASSERT3U(BP_GET_LSIZE(ibp), ==, + db->db_level == 1 ? dn->dn_datablksz : + (1<dn_phys->dn_indblkshift)); + fill += ibp->blk_fill; + } + } + + bp->blk_fill = fill; + + mutex_exit(&db->db_mtx); + + if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { + ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig))); + } else { + dsl_dataset_t *ds = os->os_dsl_dataset; + dmu_tx_t *tx = os->os_synctx; + + if (bp_orig->blk_birth == tx->tx_txg) + (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); + dsl_dataset_block_born(ds, bp, tx); + } +} + +/* ARGSUSED */ +static void +dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) +{ + dmu_buf_impl_t *db = vdb; + uint64_t txg = zio->io_txg; + dbuf_dirty_record_t **drp, *dr; + + ASSERT3U(zio->io_error, ==, 0); + + mutex_enter(&db->db_mtx); + + drp = &db->db_last_dirty; + while ((dr = *drp) != db->db_data_pending) + drp = &dr->dr_next; + ASSERT(!list_link_active(&dr->dr_dirty_node)); + ASSERT(dr->dr_txg == txg); + ASSERT(dr->dr_next == NULL); + *drp = dr->dr_next; + + if (db->db_level == 0) { + ASSERT(db->db_blkid != DB_BONUS_BLKID); + ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); + + if (db->db_state != DB_NOFILL) { + if (dr->dt.dl.dr_data != db->db_buf) + VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, + db) == 1); + else if (!BP_IS_HOLE(db->db_blkptr)) + arc_set_callback(db->db_buf, dbuf_do_evict, db); + else + ASSERT(arc_released(db->db_buf)); + } + } else { + dnode_t *dn = db->db_dnode; + + ASSERT(list_head(&dr->dt.di.dr_children) == NULL); + ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); + if (!BP_IS_HOLE(db->db_blkptr)) { + int epbs = + dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, + db->db.db_size); + ASSERT3U(dn->dn_phys->dn_maxblkid + >> (db->db_level * epbs), >=, db->db_blkid); + arc_set_callback(db->db_buf, dbuf_do_evict, db); + } + mutex_destroy(&dr->dt.di.dr_mtx); + list_destroy(&dr->dt.di.dr_children); + } + kmem_free(dr, sizeof (dbuf_dirty_record_t)); + + cv_broadcast(&db->db_changed); + ASSERT(db->db_dirtycnt > 0); + db->db_dirtycnt -= 1; + db->db_data_pending = NULL; + mutex_exit(&db->db_mtx); + + dprintf_dbuf_bp(db, zio->io_bp, "bp: %s", ""); + + dbuf_rele(db, (void *)(uintptr_t)txg); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu.c new file mode 100644 index 000000000000..b6205bd500a8 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu.c @@ -0,0 +1,1227 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef _KERNEL +#include +#include +#endif + +const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { + { byteswap_uint8_array, TRUE, "unallocated" }, + { zap_byteswap, TRUE, "object directory" }, + { byteswap_uint64_array, TRUE, "object array" }, + { byteswap_uint8_array, TRUE, "packed nvlist" }, + { byteswap_uint64_array, TRUE, "packed nvlist size" }, + { byteswap_uint64_array, TRUE, "bplist" }, + { byteswap_uint64_array, TRUE, "bplist header" }, + { byteswap_uint64_array, TRUE, "SPA space map header" }, + { byteswap_uint64_array, TRUE, "SPA space map" }, + { byteswap_uint64_array, TRUE, "ZIL intent log" }, + { dnode_buf_byteswap, TRUE, "DMU dnode" }, + { dmu_objset_byteswap, TRUE, "DMU objset" }, + { byteswap_uint64_array, TRUE, "DSL directory" }, + { zap_byteswap, TRUE, "DSL directory child map"}, + { zap_byteswap, TRUE, "DSL dataset snap map" }, + { zap_byteswap, TRUE, "DSL props" }, + { byteswap_uint64_array, TRUE, "DSL dataset" }, + { zfs_znode_byteswap, TRUE, "ZFS znode" }, + { zfs_oldacl_byteswap, TRUE, "ZFS V0 ACL" }, + { byteswap_uint8_array, FALSE, "ZFS plain file" }, + { zap_byteswap, TRUE, "ZFS directory" }, + { zap_byteswap, TRUE, "ZFS master node" }, + { zap_byteswap, TRUE, "ZFS delete queue" }, + { byteswap_uint8_array, FALSE, "zvol object" }, + { zap_byteswap, TRUE, "zvol prop" }, + { byteswap_uint8_array, FALSE, "other uint8[]" }, + { byteswap_uint64_array, FALSE, "other uint64[]" }, + { zap_byteswap, TRUE, "other ZAP" }, + { zap_byteswap, TRUE, "persistent error log" }, + { byteswap_uint8_array, TRUE, "SPA history" }, + { byteswap_uint64_array, TRUE, "SPA history offsets" }, + { zap_byteswap, TRUE, "Pool properties" }, + { zap_byteswap, TRUE, "DSL permissions" }, + { zfs_acl_byteswap, TRUE, "ZFS ACL" }, + { byteswap_uint8_array, TRUE, "ZFS SYSACL" }, + { byteswap_uint8_array, TRUE, "FUID table" }, + { byteswap_uint64_array, TRUE, "FUID table size" }, + { zap_byteswap, TRUE, "DSL dataset next clones"}, + { zap_byteswap, TRUE, "scrub work queue" }, +}; + +int +dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, + void *tag, dmu_buf_t **dbp) +{ + dnode_t *dn; + uint64_t blkid; + dmu_buf_impl_t *db; + int err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + blkid = dbuf_whichblock(dn, offset); + rw_enter(&dn->dn_struct_rwlock, RW_READER); + db = dbuf_hold(dn, blkid, tag); + rw_exit(&dn->dn_struct_rwlock); + if (db == NULL) { + err = EIO; + } else { + err = dbuf_read(db, NULL, DB_RF_CANFAIL); + if (err) { + dbuf_rele(db, tag); + db = NULL; + } + } + + dnode_rele(dn, FTAG); + *dbp = &db->db; + return (err); +} + +int +dmu_bonus_max(void) +{ + return (DN_MAX_BONUSLEN); +} + +int +dmu_set_bonus(dmu_buf_t *db, int newsize, dmu_tx_t *tx) +{ + dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; + + if (dn->dn_bonus != (dmu_buf_impl_t *)db) + return (EINVAL); + if (newsize < 0 || newsize > db->db_size) + return (EINVAL); + dnode_setbonuslen(dn, newsize, tx); + return (0); +} + +/* + * returns ENOENT, EIO, or 0. + */ +int +dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) +{ + dnode_t *dn; + dmu_buf_impl_t *db; + int error; + + error = dnode_hold(os->os, object, FTAG, &dn); + if (error) + return (error); + + rw_enter(&dn->dn_struct_rwlock, RW_READER); + if (dn->dn_bonus == NULL) { + rw_exit(&dn->dn_struct_rwlock); + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + if (dn->dn_bonus == NULL) + dbuf_create_bonus(dn); + } + db = dn->dn_bonus; + rw_exit(&dn->dn_struct_rwlock); + + /* as long as the bonus buf is held, the dnode will be held */ + if (refcount_add(&db->db_holds, tag) == 1) + VERIFY(dnode_add_ref(dn, db)); + + dnode_rele(dn, FTAG); + + VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); + + *dbp = &db->db; + return (0); +} + +/* + * Note: longer-term, we should modify all of the dmu_buf_*() interfaces + * to take a held dnode rather than -- the lookup is wasteful, + * and can induce severe lock contention when writing to several files + * whose dnodes are in the same block. + */ +static int +dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, + uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) +{ + dsl_pool_t *dp = NULL; + dmu_buf_t **dbp; + uint64_t blkid, nblks, i; + uint32_t flags; + int err; + zio_t *zio; + hrtime_t start; + + ASSERT(length <= DMU_MAX_ACCESS); + + flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT; + if (length > zfetch_array_rd_sz) + flags |= DB_RF_NOPREFETCH; + + rw_enter(&dn->dn_struct_rwlock, RW_READER); + if (dn->dn_datablkshift) { + int blkshift = dn->dn_datablkshift; + nblks = (P2ROUNDUP(offset+length, 1ULL<> blkshift; + } else { + if (offset + length > dn->dn_datablksz) { + zfs_panic_recover("zfs: accessing past end of object " + "%llx/%llx (size=%u access=%llu+%llu)", + (longlong_t)dn->dn_objset-> + os_dsl_dataset->ds_object, + (longlong_t)dn->dn_object, dn->dn_datablksz, + (longlong_t)offset, (longlong_t)length); + return (EIO); + } + nblks = 1; + } + dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); + + if (dn->dn_objset->os_dsl_dataset) + dp = dn->dn_objset->os_dsl_dataset->ds_dir->dd_pool; + if (dp && dsl_pool_sync_context(dp)) + start = gethrtime(); + zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); + blkid = dbuf_whichblock(dn, offset); + for (i = 0; i < nblks; i++) { + dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); + if (db == NULL) { + rw_exit(&dn->dn_struct_rwlock); + dmu_buf_rele_array(dbp, nblks, tag); + zio_nowait(zio); + return (EIO); + } + /* initiate async i/o */ + if (read) { + rw_exit(&dn->dn_struct_rwlock); + (void) dbuf_read(db, zio, flags); + rw_enter(&dn->dn_struct_rwlock, RW_READER); + } + dbp[i] = &db->db; + } + rw_exit(&dn->dn_struct_rwlock); + + /* wait for async i/o */ + err = zio_wait(zio); + /* track read overhead when we are in sync context */ + if (dp && dsl_pool_sync_context(dp)) + dp->dp_read_overhead += gethrtime() - start; + if (err) { + dmu_buf_rele_array(dbp, nblks, tag); + return (err); + } + + /* wait for other io to complete */ + if (read) { + for (i = 0; i < nblks; i++) { + dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; + mutex_enter(&db->db_mtx); + while (db->db_state == DB_READ || + db->db_state == DB_FILL) + cv_wait(&db->db_changed, &db->db_mtx); + if (db->db_state == DB_UNCACHED) + err = EIO; + mutex_exit(&db->db_mtx); + if (err) { + dmu_buf_rele_array(dbp, nblks, tag); + return (err); + } + } + } + + *numbufsp = nblks; + *dbpp = dbp; + return (0); +} + +static int +dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, + uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) +{ + dnode_t *dn; + int err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + + err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, + numbufsp, dbpp); + + dnode_rele(dn, FTAG); + + return (err); +} + +int +dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, + uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) +{ + dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; + int err; + + err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, + numbufsp, dbpp); + + return (err); +} + +void +dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) +{ + int i; + dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; + + if (numbufs == 0) + return; + + for (i = 0; i < numbufs; i++) { + if (dbp[i]) + dbuf_rele(dbp[i], tag); + } + + kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); +} + +void +dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) +{ + dnode_t *dn; + uint64_t blkid; + int nblks, i, err; + + if (zfs_prefetch_disable) + return; + + if (len == 0) { /* they're interested in the bonus buffer */ + dn = os->os->os_meta_dnode; + + if (object == 0 || object >= DN_MAX_OBJECT) + return; + + rw_enter(&dn->dn_struct_rwlock, RW_READER); + blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); + dbuf_prefetch(dn, blkid); + rw_exit(&dn->dn_struct_rwlock); + return; + } + + /* + * XXX - Note, if the dnode for the requested object is not + * already cached, we will do a *synchronous* read in the + * dnode_hold() call. The same is true for any indirects. + */ + err = dnode_hold(os->os, object, FTAG, &dn); + if (err != 0) + return; + + rw_enter(&dn->dn_struct_rwlock, RW_READER); + if (dn->dn_datablkshift) { + int blkshift = dn->dn_datablkshift; + nblks = (P2ROUNDUP(offset+len, 1<> blkshift; + } else { + nblks = (offset < dn->dn_datablksz); + } + + if (nblks != 0) { + blkid = dbuf_whichblock(dn, offset); + for (i = 0; i < nblks; i++) + dbuf_prefetch(dn, blkid+i); + } + + rw_exit(&dn->dn_struct_rwlock); + + dnode_rele(dn, FTAG); +} + +static int +get_next_chunk(dnode_t *dn, uint64_t *offset, uint64_t limit) +{ + uint64_t len = *offset - limit; + uint64_t chunk_len = dn->dn_datablksz * DMU_MAX_DELETEBLKCNT; + uint64_t subchunk = + dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); + + ASSERT(limit <= *offset); + + if (len <= chunk_len) { + *offset = limit; + return (0); + } + + ASSERT(ISP2(subchunk)); + + while (*offset > limit) { + uint64_t initial_offset = P2ROUNDUP(*offset, subchunk); + uint64_t delta; + int err; + + /* skip over allocated data */ + err = dnode_next_offset(dn, + DNODE_FIND_HOLE|DNODE_FIND_BACKWARDS, offset, 1, 1, 0); + if (err == ESRCH) + *offset = limit; + else if (err) + return (err); + + ASSERT3U(*offset, <=, initial_offset); + *offset = P2ALIGN(*offset, subchunk); + delta = initial_offset - *offset; + if (delta >= chunk_len) { + *offset += delta - chunk_len; + return (0); + } + chunk_len -= delta; + + /* skip over unallocated data */ + err = dnode_next_offset(dn, + DNODE_FIND_BACKWARDS, offset, 1, 1, 0); + if (err == ESRCH) + *offset = limit; + else if (err) + return (err); + + if (*offset < limit) + *offset = limit; + ASSERT3U(*offset, <, initial_offset); + } + return (0); +} + +static int +dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, + uint64_t length, boolean_t free_dnode) +{ + dmu_tx_t *tx; + uint64_t object_size, start, end, len; + boolean_t trunc = (length == DMU_OBJECT_END); + int align, err; + + align = 1 << dn->dn_datablkshift; + ASSERT(align > 0); + object_size = align == 1 ? dn->dn_datablksz : + (dn->dn_maxblkid + 1) << dn->dn_datablkshift; + + if (trunc || (end = offset + length) > object_size) + end = object_size; + if (end <= offset) + return (0); + length = end - offset; + + while (length) { + start = end; + err = get_next_chunk(dn, &start, offset); + if (err) + return (err); + len = trunc ? DMU_OBJECT_END : end - start; + + tx = dmu_tx_create(os); + dmu_tx_hold_free(tx, dn->dn_object, start, len); + err = dmu_tx_assign(tx, TXG_WAIT); + if (err) { + dmu_tx_abort(tx); + return (err); + } + + dnode_free_range(dn, start, trunc ? -1 : len, tx); + + if (start == 0 && free_dnode) { + ASSERT(trunc); + dnode_free(dn, tx); + } + + length -= end - start; + + dmu_tx_commit(tx); + end = start; + } + return (0); +} + +int +dmu_free_long_range(objset_t *os, uint64_t object, + uint64_t offset, uint64_t length) +{ + dnode_t *dn; + int err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err != 0) + return (err); + err = dmu_free_long_range_impl(os, dn, offset, length, FALSE); + dnode_rele(dn, FTAG); + return (err); +} + +int +dmu_free_object(objset_t *os, uint64_t object) +{ + dnode_t *dn; + dmu_tx_t *tx; + int err; + + err = dnode_hold_impl(os->os, object, DNODE_MUST_BE_ALLOCATED, + FTAG, &dn); + if (err != 0) + return (err); + if (dn->dn_nlevels == 1) { + tx = dmu_tx_create(os); + dmu_tx_hold_bonus(tx, object); + dmu_tx_hold_free(tx, dn->dn_object, 0, DMU_OBJECT_END); + err = dmu_tx_assign(tx, TXG_WAIT); + if (err == 0) { + dnode_free_range(dn, 0, DMU_OBJECT_END, tx); + dnode_free(dn, tx); + dmu_tx_commit(tx); + } else { + dmu_tx_abort(tx); + } + } else { + err = dmu_free_long_range_impl(os, dn, 0, DMU_OBJECT_END, TRUE); + } + dnode_rele(dn, FTAG); + return (err); +} + +int +dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, + uint64_t size, dmu_tx_t *tx) +{ + dnode_t *dn; + int err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + ASSERT(offset < UINT64_MAX); + ASSERT(size == -1ULL || size <= UINT64_MAX - offset); + dnode_free_range(dn, offset, size, tx); + dnode_rele(dn, FTAG); + return (0); +} + +int +dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + void *buf) +{ + dnode_t *dn; + dmu_buf_t **dbp; + int numbufs, i, err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + + /* + * Deal with odd block sizes, where there can't be data past the first + * block. If we ever do the tail block optimization, we will need to + * handle that here as well. + */ + if (dn->dn_datablkshift == 0) { + int newsz = offset > dn->dn_datablksz ? 0 : + MIN(size, dn->dn_datablksz - offset); + bzero((char *)buf + newsz, size - newsz); + size = newsz; + } + + while (size > 0) { + uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); + + /* + * NB: we could do this block-at-a-time, but it's nice + * to be reading in parallel. + */ + err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, + TRUE, FTAG, &numbufs, &dbp); + if (err) + break; + + for (i = 0; i < numbufs; i++) { + int tocpy; + int bufoff; + dmu_buf_t *db = dbp[i]; + + ASSERT(size > 0); + + bufoff = offset - db->db_offset; + tocpy = (int)MIN(db->db_size - bufoff, size); + + bcopy((char *)db->db_data + bufoff, buf, tocpy); + + offset += tocpy; + size -= tocpy; + buf = (char *)buf + tocpy; + } + dmu_buf_rele_array(dbp, numbufs, FTAG); + } + dnode_rele(dn, FTAG); + return (err); +} + +void +dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + const void *buf, dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs, i; + + if (size == 0) + return; + + VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, + FALSE, FTAG, &numbufs, &dbp)); + + for (i = 0; i < numbufs; i++) { + int tocpy; + int bufoff; + dmu_buf_t *db = dbp[i]; + + ASSERT(size > 0); + + bufoff = offset - db->db_offset; + tocpy = (int)MIN(db->db_size - bufoff, size); + + ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); + + if (tocpy == db->db_size) + dmu_buf_will_fill(db, tx); + else + dmu_buf_will_dirty(db, tx); + + bcopy(buf, (char *)db->db_data + bufoff, tocpy); + + if (tocpy == db->db_size) + dmu_buf_fill_done(db, tx); + + offset += tocpy; + size -= tocpy; + buf = (char *)buf + tocpy; + } + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +void +dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs, i; + + if (size == 0) + return; + + VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, + FALSE, FTAG, &numbufs, &dbp)); + + for (i = 0; i < numbufs; i++) { + dmu_buf_t *db = dbp[i]; + + dmu_buf_will_not_fill(db, tx); + } + dmu_buf_rele_array(dbp, numbufs, FTAG); +} + +#ifdef _KERNEL +int +dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) +{ + dmu_buf_t **dbp; + int numbufs, i, err; + + /* + * NB: we could do this block-at-a-time, but it's nice + * to be reading in parallel. + */ + err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG, + &numbufs, &dbp); + if (err) + return (err); + + for (i = 0; i < numbufs; i++) { + int tocpy; + int bufoff; + dmu_buf_t *db = dbp[i]; + + ASSERT(size > 0); + + bufoff = uio->uio_loffset - db->db_offset; + tocpy = (int)MIN(db->db_size - bufoff, size); + + err = uiomove((char *)db->db_data + bufoff, tocpy, + UIO_READ, uio); + if (err) + break; + + size -= tocpy; + } + dmu_buf_rele_array(dbp, numbufs, FTAG); + + return (err); +} + +int +dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size, + dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs, i; + int err = 0; + + if (size == 0) + return (0); + + err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, + FALSE, FTAG, &numbufs, &dbp); + if (err) + return (err); + + for (i = 0; i < numbufs; i++) { + int tocpy; + int bufoff; + dmu_buf_t *db = dbp[i]; + + ASSERT(size > 0); + + bufoff = uio->uio_loffset - db->db_offset; + tocpy = (int)MIN(db->db_size - bufoff, size); + + ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); + + if (tocpy == db->db_size) + dmu_buf_will_fill(db, tx); + else + dmu_buf_will_dirty(db, tx); + + /* + * XXX uiomove could block forever (eg. nfs-backed + * pages). There needs to be a uiolockdown() function + * to lock the pages in memory, so that uiomove won't + * block. + */ + err = uiomove((char *)db->db_data + bufoff, tocpy, + UIO_WRITE, uio); + + if (tocpy == db->db_size) + dmu_buf_fill_done(db, tx); + + if (err) + break; + + size -= tocpy; + } + dmu_buf_rele_array(dbp, numbufs, FTAG); + return (err); +} + +int +dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + page_t *pp, dmu_tx_t *tx) +{ + dmu_buf_t **dbp; + int numbufs, i; + int err; + + if (size == 0) + return (0); + + err = dmu_buf_hold_array(os, object, offset, size, + FALSE, FTAG, &numbufs, &dbp); + if (err) + return (err); + + for (i = 0; i < numbufs; i++) { + int tocpy, copied, thiscpy; + int bufoff; + dmu_buf_t *db = dbp[i]; + caddr_t va; + + ASSERT(size > 0); + ASSERT3U(db->db_size, >=, PAGESIZE); + + bufoff = offset - db->db_offset; + tocpy = (int)MIN(db->db_size - bufoff, size); + + ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); + + if (tocpy == db->db_size) + dmu_buf_will_fill(db, tx); + else + dmu_buf_will_dirty(db, tx); + + for (copied = 0; copied < tocpy; copied += PAGESIZE) { + ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff); + thiscpy = MIN(PAGESIZE, tocpy - copied); + va = zfs_map_page(pp, S_READ); + bcopy(va, (char *)db->db_data + bufoff, thiscpy); + zfs_unmap_page(pp, va); + pp = pp->p_next; + bufoff += PAGESIZE; + } + + if (tocpy == db->db_size) + dmu_buf_fill_done(db, tx); + + if (err) + break; + + offset += tocpy; + size -= tocpy; + } + dmu_buf_rele_array(dbp, numbufs, FTAG); + return (err); +} +#endif + +typedef struct { + dbuf_dirty_record_t *dr; + dmu_sync_cb_t *done; + void *arg; +} dmu_sync_arg_t; + +/* ARGSUSED */ +static void +dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) +{ + blkptr_t *bp = zio->io_bp; + + if (!BP_IS_HOLE(bp)) { + dmu_sync_arg_t *in = varg; + dbuf_dirty_record_t *dr = in->dr; + dmu_buf_impl_t *db = dr->dr_dbuf; + ASSERT(BP_GET_TYPE(bp) == db->db_dnode->dn_type); + ASSERT(BP_GET_LEVEL(bp) == 0); + bp->blk_fill = 1; + } +} + +/* ARGSUSED */ +static void +dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) +{ + dmu_sync_arg_t *in = varg; + dbuf_dirty_record_t *dr = in->dr; + dmu_buf_impl_t *db = dr->dr_dbuf; + dmu_sync_cb_t *done = in->done; + + mutex_enter(&db->db_mtx); + ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); + dr->dt.dl.dr_overridden_by = *zio->io_bp; /* structure assignment */ + dr->dt.dl.dr_override_state = DR_OVERRIDDEN; + cv_broadcast(&db->db_changed); + mutex_exit(&db->db_mtx); + + if (done) + done(&(db->db), in->arg); + + kmem_free(in, sizeof (dmu_sync_arg_t)); +} + +/* + * Intent log support: sync the block associated with db to disk. + * N.B. and XXX: the caller is responsible for making sure that the + * data isn't changing while dmu_sync() is writing it. + * + * Return values: + * + * EEXIST: this txg has already been synced, so there's nothing to to. + * The caller should not log the write. + * + * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. + * The caller should not log the write. + * + * EALREADY: this block is already in the process of being synced. + * The caller should track its progress (somehow). + * + * EINPROGRESS: the IO has been initiated. + * The caller should log this blkptr in the callback. + * + * 0: completed. Sets *bp to the blkptr just written. + * The caller should log this blkptr immediately. + */ +int +dmu_sync(zio_t *pio, dmu_buf_t *db_fake, + blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg) +{ + dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; + objset_impl_t *os = db->db_objset; + dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; + tx_state_t *tx = &dp->dp_tx; + dbuf_dirty_record_t *dr; + dmu_sync_arg_t *in; + zbookmark_t zb; + writeprops_t wp = { 0 }; + zio_t *zio; + int err; + + ASSERT(BP_IS_HOLE(bp)); + ASSERT(txg != 0); + + dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n", + txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg); + + /* + * XXX - would be nice if we could do this without suspending... + */ + txg_suspend(dp); + + /* + * If this txg already synced, there's nothing to do. + */ + if (txg <= tx->tx_synced_txg) { + txg_resume(dp); + /* + * If we're running ziltest, we need the blkptr regardless. + */ + if (txg > spa_freeze_txg(dp->dp_spa)) { + /* if db_blkptr == NULL, this was an empty write */ + if (db->db_blkptr) + *bp = *db->db_blkptr; /* structure assignment */ + return (0); + } + return (EEXIST); + } + + mutex_enter(&db->db_mtx); + + if (txg == tx->tx_syncing_txg) { + while (db->db_data_pending) { + /* + * IO is in-progress. Wait for it to finish. + * XXX - would be nice to be able to somehow "attach" + * this zio to the parent zio passed in. + */ + cv_wait(&db->db_changed, &db->db_mtx); + if (!db->db_data_pending && + db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) { + /* + * IO was compressed away + */ + *bp = *db->db_blkptr; /* structure assignment */ + mutex_exit(&db->db_mtx); + txg_resume(dp); + return (0); + } + ASSERT(db->db_data_pending || + (db->db_blkptr && db->db_blkptr->blk_birth == txg)); + } + + if (db->db_blkptr && db->db_blkptr->blk_birth == txg) { + /* + * IO is already completed. + */ + *bp = *db->db_blkptr; /* structure assignment */ + mutex_exit(&db->db_mtx); + txg_resume(dp); + return (0); + } + } + + dr = db->db_last_dirty; + while (dr && dr->dr_txg > txg) + dr = dr->dr_next; + if (dr == NULL || dr->dr_txg < txg) { + /* + * This dbuf isn't dirty, must have been free_range'd. + * There's no need to log writes to freed blocks, so we're done. + */ + mutex_exit(&db->db_mtx); + txg_resume(dp); + return (ENOENT); + } + + ASSERT(dr->dr_txg == txg); + if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { + /* + * We have already issued a sync write for this buffer. + */ + mutex_exit(&db->db_mtx); + txg_resume(dp); + return (EALREADY); + } else if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { + /* + * This buffer has already been synced. It could not + * have been dirtied since, or we would have cleared the state. + */ + *bp = dr->dt.dl.dr_overridden_by; /* structure assignment */ + mutex_exit(&db->db_mtx); + txg_resume(dp); + return (0); + } + + dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; + in = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); + in->dr = dr; + in->done = done; + in->arg = arg; + mutex_exit(&db->db_mtx); + txg_resume(dp); + + zb.zb_objset = os->os_dsl_dataset->ds_object; + zb.zb_object = db->db.db_object; + zb.zb_level = db->db_level; + zb.zb_blkid = db->db_blkid; + + wp.wp_type = db->db_dnode->dn_type; + wp.wp_level = db->db_level; + wp.wp_copies = os->os_copies; + wp.wp_dnchecksum = db->db_dnode->dn_checksum; + wp.wp_oschecksum = os->os_checksum; + wp.wp_dncompress = db->db_dnode->dn_compress; + wp.wp_oscompress = os->os_compress; + + ASSERT(BP_IS_HOLE(bp)); + + zio = arc_write(pio, os->os_spa, &wp, DBUF_IS_L2CACHEABLE(db), + txg, bp, dr->dt.dl.dr_data, dmu_sync_ready, dmu_sync_done, in, + ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); + if (pio) { + zio_nowait(zio); + err = EINPROGRESS; + } else { + err = zio_wait(zio); + ASSERT(err == 0); + } + return (err); +} + +int +dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, + dmu_tx_t *tx) +{ + dnode_t *dn; + int err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + err = dnode_set_blksz(dn, size, ibs, tx); + dnode_rele(dn, FTAG); + return (err); +} + +void +dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, + dmu_tx_t *tx) +{ + dnode_t *dn; + + /* XXX assumes dnode_hold will not get an i/o error */ + (void) dnode_hold(os->os, object, FTAG, &dn); + ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); + dn->dn_checksum = checksum; + dnode_setdirty(dn, tx); + dnode_rele(dn, FTAG); +} + +void +dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, + dmu_tx_t *tx) +{ + dnode_t *dn; + + /* XXX assumes dnode_hold will not get an i/o error */ + (void) dnode_hold(os->os, object, FTAG, &dn); + ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); + dn->dn_compress = compress; + dnode_setdirty(dn, tx); + dnode_rele(dn, FTAG); +} + +int +dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) +{ + dnode_t *dn; + int i, err; + + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + /* + * Sync any current changes before + * we go trundling through the block pointers. + */ + for (i = 0; i < TXG_SIZE; i++) { + if (list_link_active(&dn->dn_dirty_link[i])) + break; + } + if (i != TXG_SIZE) { + dnode_rele(dn, FTAG); + txg_wait_synced(dmu_objset_pool(os), 0); + err = dnode_hold(os->os, object, FTAG, &dn); + if (err) + return (err); + } + + err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); + dnode_rele(dn, FTAG); + + return (err); +} + +void +dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) +{ + rw_enter(&dn->dn_struct_rwlock, RW_READER); + mutex_enter(&dn->dn_mtx); + + doi->doi_data_block_size = dn->dn_datablksz; + doi->doi_metadata_block_size = dn->dn_indblkshift ? + 1ULL << dn->dn_indblkshift : 0; + doi->doi_indirection = dn->dn_nlevels; + doi->doi_checksum = dn->dn_checksum; + doi->doi_compress = dn->dn_compress; + doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) + + SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT; + doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid; + doi->doi_type = dn->dn_type; + doi->doi_bonus_size = dn->dn_bonuslen; + doi->doi_bonus_type = dn->dn_bonustype; + + mutex_exit(&dn->dn_mtx); + rw_exit(&dn->dn_struct_rwlock); +} + +/* + * Get information on a DMU object. + * If doi is NULL, just indicates whether the object exists. + */ +int +dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) +{ + dnode_t *dn; + int err = dnode_hold(os->os, object, FTAG, &dn); + + if (err) + return (err); + + if (doi != NULL) + dmu_object_info_from_dnode(dn, doi); + + dnode_rele(dn, FTAG); + return (0); +} + +/* + * As above, but faster; can be used when you have a held dbuf in hand. + */ +void +dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) +{ + dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); +} + +/* + * Faster still when you only care about the size. + * This is specifically optimized for zfs_getattr(). + */ +void +dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) +{ + dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; + + *blksize = dn->dn_datablksz; + /* add 1 for dnode space */ + *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> + SPA_MINBLOCKSHIFT) + 1; +} + +void +byteswap_uint64_array(void *vbuf, size_t size) +{ + uint64_t *buf = vbuf; + size_t count = size >> 3; + int i; + + ASSERT((size & 7) == 0); + + for (i = 0; i < count; i++) + buf[i] = BSWAP_64(buf[i]); +} + +void +byteswap_uint32_array(void *vbuf, size_t size) +{ + uint32_t *buf = vbuf; + size_t count = size >> 2; + int i; + + ASSERT((size & 3) == 0); + + for (i = 0; i < count; i++) + buf[i] = BSWAP_32(buf[i]); +} + +void +byteswap_uint16_array(void *vbuf, size_t size) +{ + uint16_t *buf = vbuf; + size_t count = size >> 1; + int i; + + ASSERT((size & 1) == 0); + + for (i = 0; i < count; i++) + buf[i] = BSWAP_16(buf[i]); +} + +/* ARGSUSED */ +void +byteswap_uint8_array(void *vbuf, size_t size) +{ +} + +void +dmu_init(void) +{ + dbuf_init(); + dnode_init(); + arc_init(); + l2arc_init(); +} + +void +dmu_fini(void) +{ + arc_fini(); + dnode_fini(); + dbuf_fini(); + l2arc_fini(); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_object.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_object.c new file mode 100644 index 000000000000..1b9247d66e65 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_object.c @@ -0,0 +1,162 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +uint64_t +dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + objset_impl_t *osi = os->os; + uint64_t object; + uint64_t L2_dnode_count = DNODES_PER_BLOCK << + (osi->os_meta_dnode->dn_indblkshift - SPA_BLKPTRSHIFT); + dnode_t *dn = NULL; + int restarted = B_FALSE; + + mutex_enter(&osi->os_obj_lock); + for (;;) { + object = osi->os_obj_next; + /* + * Each time we polish off an L2 bp worth of dnodes + * (2^13 objects), move to another L2 bp that's still + * reasonably sparse (at most 1/4 full). Look from the + * beginning once, but after that keep looking from here. + * If we can't find one, just keep going from here. + */ + if (P2PHASE(object, L2_dnode_count) == 0) { + uint64_t offset = restarted ? object << DNODE_SHIFT : 0; + int error = dnode_next_offset(osi->os_meta_dnode, + DNODE_FIND_HOLE, + &offset, 2, DNODES_PER_BLOCK >> 2, 0); + restarted = B_TRUE; + if (error == 0) + object = offset >> DNODE_SHIFT; + } + osi->os_obj_next = ++object; + + /* + * XXX We should check for an i/o error here and return + * up to our caller. Actually we should pre-read it in + * dmu_tx_assign(), but there is currently no mechanism + * to do so. + */ + (void) dnode_hold_impl(os->os, object, DNODE_MUST_BE_FREE, + FTAG, &dn); + if (dn) + break; + + if (dmu_object_next(os, &object, B_TRUE, 0) == 0) + osi->os_obj_next = object - 1; + } + + dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx); + dnode_rele(dn, FTAG); + + mutex_exit(&osi->os_obj_lock); + + dmu_tx_add_new_object(tx, os, object); + return (object); +} + +int +dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, + int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + dnode_t *dn; + int err; + + if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx)) + return (EBADF); + + err = dnode_hold_impl(os->os, object, DNODE_MUST_BE_FREE, FTAG, &dn); + if (err) + return (err); + dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx); + dnode_rele(dn, FTAG); + + dmu_tx_add_new_object(tx, os, object); + return (0); +} + +int +dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, + int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + dnode_t *dn; + int err; + + if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx)) + return (EBADF); + + err = dnode_hold_impl(os->os, object, DNODE_MUST_BE_ALLOCATED, + FTAG, &dn); + if (err) + return (err); + dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, tx); + dnode_rele(dn, FTAG); + + return (0); +} + +int +dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx) +{ + dnode_t *dn; + int err; + + ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); + + err = dnode_hold_impl(os->os, object, DNODE_MUST_BE_ALLOCATED, + FTAG, &dn); + if (err) + return (err); + + ASSERT(dn->dn_type != DMU_OT_NONE); + dnode_free_range(dn, 0, DMU_OBJECT_END, tx); + dnode_free(dn, tx); + dnode_rele(dn, FTAG); + + return (0); +} + +int +dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg) +{ + uint64_t offset = (*objectp + 1) << DNODE_SHIFT; + int error; + + error = dnode_next_offset(os->os->os_meta_dnode, + (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg); + + *objectp = offset >> DNODE_SHIFT; + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_objset.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_objset.c new file mode 100644 index 000000000000..f37cc2fc56cb --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_objset.c @@ -0,0 +1,1260 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +spa_t * +dmu_objset_spa(objset_t *os) +{ + return (os->os->os_spa); +} + +zilog_t * +dmu_objset_zil(objset_t *os) +{ + return (os->os->os_zil); +} + +dsl_pool_t * +dmu_objset_pool(objset_t *os) +{ + dsl_dataset_t *ds; + + if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir) + return (ds->ds_dir->dd_pool); + else + return (spa_get_dsl(os->os->os_spa)); +} + +dsl_dataset_t * +dmu_objset_ds(objset_t *os) +{ + return (os->os->os_dsl_dataset); +} + +dmu_objset_type_t +dmu_objset_type(objset_t *os) +{ + return (os->os->os_phys->os_type); +} + +void +dmu_objset_name(objset_t *os, char *buf) +{ + dsl_dataset_name(os->os->os_dsl_dataset, buf); +} + +uint64_t +dmu_objset_id(objset_t *os) +{ + dsl_dataset_t *ds = os->os->os_dsl_dataset; + + return (ds ? ds->ds_object : 0); +} + +static void +checksum_changed_cb(void *arg, uint64_t newval) +{ + objset_impl_t *osi = arg; + + /* + * Inheritance should have been done by now. + */ + ASSERT(newval != ZIO_CHECKSUM_INHERIT); + + osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); +} + +static void +compression_changed_cb(void *arg, uint64_t newval) +{ + objset_impl_t *osi = arg; + + /* + * Inheritance and range checking should have been done by now. + */ + ASSERT(newval != ZIO_COMPRESS_INHERIT); + + osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); +} + +static void +copies_changed_cb(void *arg, uint64_t newval) +{ + objset_impl_t *osi = arg; + + /* + * Inheritance and range checking should have been done by now. + */ + ASSERT(newval > 0); + ASSERT(newval <= spa_max_replication(osi->os_spa)); + + osi->os_copies = newval; +} + +static void +primary_cache_changed_cb(void *arg, uint64_t newval) +{ + objset_impl_t *osi = arg; + + /* + * Inheritance and range checking should have been done by now. + */ + ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || + newval == ZFS_CACHE_METADATA); + + osi->os_primary_cache = newval; +} + +static void +secondary_cache_changed_cb(void *arg, uint64_t newval) +{ + objset_impl_t *osi = arg; + + /* + * Inheritance and range checking should have been done by now. + */ + ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || + newval == ZFS_CACHE_METADATA); + + osi->os_secondary_cache = newval; +} + +void +dmu_objset_byteswap(void *buf, size_t size) +{ + objset_phys_t *osp = buf; + + ASSERT(size == sizeof (objset_phys_t)); + dnode_byteswap(&osp->os_meta_dnode); + byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); + osp->os_type = BSWAP_64(osp->os_type); +} + +int +dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, + objset_impl_t **osip) +{ + objset_impl_t *osi; + int i, err; + + ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); + + osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP); + osi->os.os = osi; + osi->os_dsl_dataset = ds; + osi->os_spa = spa; + osi->os_rootbp = bp; + if (!BP_IS_HOLE(osi->os_rootbp)) { + uint32_t aflags = ARC_WAIT; + zbookmark_t zb; + zb.zb_objset = ds ? ds->ds_object : 0; + zb.zb_object = 0; + zb.zb_level = -1; + zb.zb_blkid = 0; + if (DMU_OS_IS_L2CACHEABLE(osi)) + aflags |= ARC_L2CACHE; + + dprintf_bp(osi->os_rootbp, "reading %s", ""); + /* + * NB: when bprewrite scrub can change the bp, + * and this is called from dmu_objset_open_ds_os, the bp + * could change, and we'll need a lock. + */ + err = arc_read_nolock(NULL, spa, osi->os_rootbp, + arc_getbuf_func, &osi->os_phys_buf, + ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); + if (err) { + kmem_free(osi, sizeof (objset_impl_t)); + /* convert checksum errors into IO errors */ + if (err == ECKSUM) + err = EIO; + return (err); + } + osi->os_phys = osi->os_phys_buf->b_data; + } else { + osi->os_phys_buf = arc_buf_alloc(spa, sizeof (objset_phys_t), + &osi->os_phys_buf, ARC_BUFC_METADATA); + osi->os_phys = osi->os_phys_buf->b_data; + bzero(osi->os_phys, sizeof (objset_phys_t)); + } + + /* + * Note: the changed_cb will be called once before the register + * func returns, thus changing the checksum/compression from the + * default (fletcher2/off). Snapshots don't need to know about + * checksum/compression/copies. + */ + if (ds) { + err = dsl_prop_register(ds, "primarycache", + primary_cache_changed_cb, osi); + if (err == 0) + err = dsl_prop_register(ds, "secondarycache", + secondary_cache_changed_cb, osi); + if (!dsl_dataset_is_snapshot(ds)) { + if (err == 0) + err = dsl_prop_register(ds, "checksum", + checksum_changed_cb, osi); + if (err == 0) + err = dsl_prop_register(ds, "compression", + compression_changed_cb, osi); + if (err == 0) + err = dsl_prop_register(ds, "copies", + copies_changed_cb, osi); + } + if (err) { + VERIFY(arc_buf_remove_ref(osi->os_phys_buf, + &osi->os_phys_buf) == 1); + kmem_free(osi, sizeof (objset_impl_t)); + return (err); + } + } else if (ds == NULL) { + /* It's the meta-objset. */ + osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4; + osi->os_compress = ZIO_COMPRESS_LZJB; + osi->os_copies = spa_max_replication(spa); + osi->os_primary_cache = ZFS_CACHE_ALL; + osi->os_secondary_cache = ZFS_CACHE_ALL; + } + + osi->os_zil_header = osi->os_phys->os_zil_header; + osi->os_zil = zil_alloc(&osi->os, &osi->os_zil_header); + + for (i = 0; i < TXG_SIZE; i++) { + list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t), + offsetof(dnode_t, dn_dirty_link[i])); + list_create(&osi->os_free_dnodes[i], sizeof (dnode_t), + offsetof(dnode_t, dn_dirty_link[i])); + } + list_create(&osi->os_dnodes, sizeof (dnode_t), + offsetof(dnode_t, dn_link)); + list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), + offsetof(dmu_buf_impl_t, db_link)); + + mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&osi->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); + + osi->os_meta_dnode = dnode_special_open(osi, + &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); + + /* + * We should be the only thread trying to do this because we + * have ds_opening_lock + */ + if (ds) { + VERIFY(NULL == dsl_dataset_set_user_ptr(ds, osi, + dmu_objset_evict)); + } + + *osip = osi; + return (0); +} + +static int +dmu_objset_open_ds_os(dsl_dataset_t *ds, objset_t *os, dmu_objset_type_t type) +{ + objset_impl_t *osi; + + mutex_enter(&ds->ds_opening_lock); + osi = dsl_dataset_get_user_ptr(ds); + if (osi == NULL) { + int err; + + err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), + ds, &ds->ds_phys->ds_bp, &osi); + if (err) { + mutex_exit(&ds->ds_opening_lock); + return (err); + } + } + mutex_exit(&ds->ds_opening_lock); + + os->os = osi; + os->os_mode = DS_MODE_NOHOLD; + + if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) + return (EINVAL); + return (0); +} + +int +dmu_objset_open_ds(dsl_dataset_t *ds, dmu_objset_type_t type, objset_t **osp) +{ + objset_t *os; + int err; + + os = kmem_alloc(sizeof (objset_t), KM_SLEEP); + err = dmu_objset_open_ds_os(ds, os, type); + if (err) + kmem_free(os, sizeof (objset_t)); + else + *osp = os; + return (err); +} + +/* called from zpl */ +int +dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, + objset_t **osp) +{ + objset_t *os; + dsl_dataset_t *ds; + int err; + + ASSERT(DS_MODE_TYPE(mode) == DS_MODE_USER || + DS_MODE_TYPE(mode) == DS_MODE_OWNER); + + os = kmem_alloc(sizeof (objset_t), KM_SLEEP); + if (DS_MODE_TYPE(mode) == DS_MODE_USER) + err = dsl_dataset_hold(name, os, &ds); + else + err = dsl_dataset_own(name, mode, os, &ds); + if (err) { + kmem_free(os, sizeof (objset_t)); + return (err); + } + + err = dmu_objset_open_ds_os(ds, os, type); + if (err) { + if (DS_MODE_TYPE(mode) == DS_MODE_USER) + dsl_dataset_rele(ds, os); + else + dsl_dataset_disown(ds, os); + kmem_free(os, sizeof (objset_t)); + } else { + os->os_mode = mode; + *osp = os; + } + return (err); +} + +void +dmu_objset_close(objset_t *os) +{ + ASSERT(DS_MODE_TYPE(os->os_mode) == DS_MODE_USER || + DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER || + DS_MODE_TYPE(os->os_mode) == DS_MODE_NOHOLD); + + if (DS_MODE_TYPE(os->os_mode) == DS_MODE_USER) + dsl_dataset_rele(os->os->os_dsl_dataset, os); + else if (DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER) + dsl_dataset_disown(os->os->os_dsl_dataset, os); + kmem_free(os, sizeof (objset_t)); +} + +int +dmu_objset_evict_dbufs(objset_t *os) +{ + objset_impl_t *osi = os->os; + dnode_t *dn; + + mutex_enter(&osi->os_lock); + + /* process the mdn last, since the other dnodes have holds on it */ + list_remove(&osi->os_dnodes, osi->os_meta_dnode); + list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode); + + /* + * Find the first dnode with holds. We have to do this dance + * because dnode_add_ref() only works if you already have a + * hold. If there are no holds then it has no dbufs so OK to + * skip. + */ + for (dn = list_head(&osi->os_dnodes); + dn && !dnode_add_ref(dn, FTAG); + dn = list_next(&osi->os_dnodes, dn)) + continue; + + while (dn) { + dnode_t *next_dn = dn; + + do { + next_dn = list_next(&osi->os_dnodes, next_dn); + } while (next_dn && !dnode_add_ref(next_dn, FTAG)); + + mutex_exit(&osi->os_lock); + dnode_evict_dbufs(dn); + dnode_rele(dn, FTAG); + mutex_enter(&osi->os_lock); + dn = next_dn; + } + mutex_exit(&osi->os_lock); + return (list_head(&osi->os_dnodes) != osi->os_meta_dnode); +} + +void +dmu_objset_evict(dsl_dataset_t *ds, void *arg) +{ + objset_impl_t *osi = arg; + objset_t os; + int i; + + for (i = 0; i < TXG_SIZE; i++) { + ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL); + ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL); + } + + if (ds) { + if (!dsl_dataset_is_snapshot(ds)) { + VERIFY(0 == dsl_prop_unregister(ds, "checksum", + checksum_changed_cb, osi)); + VERIFY(0 == dsl_prop_unregister(ds, "compression", + compression_changed_cb, osi)); + VERIFY(0 == dsl_prop_unregister(ds, "copies", + copies_changed_cb, osi)); + } + VERIFY(0 == dsl_prop_unregister(ds, "primarycache", + primary_cache_changed_cb, osi)); + VERIFY(0 == dsl_prop_unregister(ds, "secondarycache", + secondary_cache_changed_cb, osi)); + } + + /* + * We should need only a single pass over the dnode list, since + * nothing can be added to the list at this point. + */ + os.os = osi; + (void) dmu_objset_evict_dbufs(&os); + + ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode); + ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode); + ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL); + + dnode_special_close(osi->os_meta_dnode); + zil_free(osi->os_zil); + + VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1); + mutex_destroy(&osi->os_lock); + mutex_destroy(&osi->os_obj_lock); + mutex_destroy(&osi->os_user_ptr_lock); + kmem_free(osi, sizeof (objset_impl_t)); +} + +/* called from dsl for meta-objset */ +objset_impl_t * +dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, + dmu_objset_type_t type, dmu_tx_t *tx) +{ + objset_impl_t *osi; + dnode_t *mdn; + + ASSERT(dmu_tx_is_syncing(tx)); + if (ds) + mutex_enter(&ds->ds_opening_lock); + VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &osi)); + if (ds) + mutex_exit(&ds->ds_opening_lock); + mdn = osi->os_meta_dnode; + + dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, + DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); + + /* + * We don't want to have to increase the meta-dnode's nlevels + * later, because then we could do it in quescing context while + * we are also accessing it in open context. + * + * This precaution is not necessary for the MOS (ds == NULL), + * because the MOS is only updated in syncing context. + * This is most fortunate: the MOS is the only objset that + * needs to be synced multiple times as spa_sync() iterates + * to convergence, so minimizing its dn_nlevels matters. + */ + if (ds != NULL) { + int levels = 1; + + /* + * Determine the number of levels necessary for the meta-dnode + * to contain DN_MAX_OBJECT dnodes. + */ + while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + + (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < + DN_MAX_OBJECT * sizeof (dnode_phys_t)) + levels++; + + mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = + mdn->dn_nlevels = levels; + } + + ASSERT(type != DMU_OST_NONE); + ASSERT(type != DMU_OST_ANY); + ASSERT(type < DMU_OST_NUMTYPES); + osi->os_phys->os_type = type; + + dsl_dataset_dirty(ds, tx); + + return (osi); +} + +struct oscarg { + void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); + void *userarg; + dsl_dataset_t *clone_parent; + const char *lastname; + dmu_objset_type_t type; + uint64_t flags; +}; + +/*ARGSUSED*/ +static int +dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct oscarg *oa = arg2; + objset_t *mos = dd->dd_pool->dp_meta_objset; + int err; + uint64_t ddobj; + + err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, + oa->lastname, sizeof (uint64_t), 1, &ddobj); + if (err != ENOENT) + return (err ? err : EEXIST); + + if (oa->clone_parent != NULL) { + /* + * You can't clone across pools. + */ + if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool) + return (EXDEV); + + /* + * You can only clone snapshots, not the head datasets. + */ + if (oa->clone_parent->ds_phys->ds_num_children == 0) + return (EINVAL); + } + + return (0); +} + +static void +dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct oscarg *oa = arg2; + dsl_dataset_t *ds; + blkptr_t *bp; + uint64_t dsobj; + + ASSERT(dmu_tx_is_syncing(tx)); + + dsobj = dsl_dataset_create_sync(dd, oa->lastname, + oa->clone_parent, oa->flags, cr, tx); + + VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, FTAG, &ds)); + bp = dsl_dataset_get_blkptr(ds); + if (BP_IS_HOLE(bp)) { + objset_impl_t *osi; + + /* This is an empty dmu_objset; not a clone. */ + osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds), + ds, bp, oa->type, tx); + + if (oa->userfunc) + oa->userfunc(&osi->os, oa->userarg, cr, tx); + } + + spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa, + tx, cr, "dataset = %llu", dsobj); + + dsl_dataset_rele(ds, FTAG); +} + +int +dmu_objset_create(const char *name, dmu_objset_type_t type, + objset_t *clone_parent, uint64_t flags, + void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) +{ + dsl_dir_t *pdd; + const char *tail; + int err = 0; + struct oscarg oa = { 0 }; + + ASSERT(strchr(name, '@') == NULL); + err = dsl_dir_open(name, FTAG, &pdd, &tail); + if (err) + return (err); + if (tail == NULL) { + dsl_dir_close(pdd, FTAG); + return (EEXIST); + } + + dprintf("name=%s\n", name); + + oa.userfunc = func; + oa.userarg = arg; + oa.lastname = tail; + oa.type = type; + oa.flags = flags; + + if (clone_parent != NULL) { + /* + * You can't clone to a different type. + */ + if (clone_parent->os->os_phys->os_type != type) { + dsl_dir_close(pdd, FTAG); + return (EINVAL); + } + oa.clone_parent = clone_parent->os->os_dsl_dataset; + } + err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, + dmu_objset_create_sync, pdd, &oa, 5); + dsl_dir_close(pdd, FTAG); + return (err); +} + +int +dmu_objset_destroy(const char *name) +{ + objset_t *os; + int error; + + /* + * If it looks like we'll be able to destroy it, and there's + * an unplayed replay log sitting around, destroy the log. + * It would be nicer to do this in dsl_dataset_destroy_sync(), + * but the replay log objset is modified in open context. + */ + error = dmu_objset_open(name, DMU_OST_ANY, + DS_MODE_OWNER|DS_MODE_READONLY|DS_MODE_INCONSISTENT, &os); + if (error == 0) { + dsl_dataset_t *ds = os->os->os_dsl_dataset; + zil_destroy(dmu_objset_zil(os), B_FALSE); + + error = dsl_dataset_destroy(ds, os); + /* + * dsl_dataset_destroy() closes the ds. + */ + kmem_free(os, sizeof (objset_t)); + } + + return (error); +} + +/* + * This will close the objset. + */ +int +dmu_objset_rollback(objset_t *os) +{ + int err; + dsl_dataset_t *ds; + + ds = os->os->os_dsl_dataset; + + if (!dsl_dataset_tryown(ds, TRUE, os)) { + dmu_objset_close(os); + return (EBUSY); + } + + err = dsl_dataset_rollback(ds, os->os->os_phys->os_type); + + /* + * NB: we close the objset manually because the rollback + * actually implicitly called dmu_objset_evict(), thus freeing + * the objset_impl_t. + */ + dsl_dataset_disown(ds, os); + kmem_free(os, sizeof (objset_t)); + return (err); +} + +struct snaparg { + dsl_sync_task_group_t *dstg; + char *snapname; + char failed[MAXPATHLEN]; + boolean_t checkperms; + list_t objsets; +}; + +struct osnode { + list_node_t node; + objset_t *os; +}; + +static int +dmu_objset_snapshot_one(char *name, void *arg) +{ + struct snaparg *sn = arg; + objset_t *os; + int err; + + (void) strcpy(sn->failed, name); + + /* + * Check permissions only when requested. This only applies when + * doing a recursive snapshot. The permission checks for the starting + * dataset have already been performed in zfs_secpolicy_snapshot() + */ + if (sn->checkperms == B_TRUE && + (err = zfs_secpolicy_snapshot_perms(name, CRED()))) + return (err); + + err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_USER, &os); + if (err != 0) + return (err); + + /* If the objset is in an inconsistent state, return busy */ + if (os->os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) { + dmu_objset_close(os); + return (EBUSY); + } + + /* + * NB: we need to wait for all in-flight changes to get to disk, + * so that we snapshot those changes. zil_suspend does this as + * a side effect. + */ + err = zil_suspend(dmu_objset_zil(os)); + if (err == 0) { + struct osnode *osn; + dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check, + dsl_dataset_snapshot_sync, os->os->os_dsl_dataset, + sn->snapname, 3); + osn = kmem_alloc(sizeof (struct osnode), KM_SLEEP); + osn->os = os; + list_insert_tail(&sn->objsets, osn); + } else { + dmu_objset_close(os); + } + + return (err); +} + +int +dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive) +{ + dsl_sync_task_t *dst; + struct osnode *osn; + struct snaparg sn = { 0 }; + spa_t *spa; + int err; + + (void) strcpy(sn.failed, fsname); + + err = spa_open(fsname, &spa, FTAG); + if (err) + return (err); + + sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); + sn.snapname = snapname; + list_create(&sn.objsets, sizeof (struct osnode), + offsetof(struct osnode, node)); + + if (recursive) { + sn.checkperms = B_TRUE; + err = dmu_objset_find(fsname, + dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); + } else { + sn.checkperms = B_FALSE; + err = dmu_objset_snapshot_one(fsname, &sn); + } + + if (err) + goto out; + + err = dsl_sync_task_group_wait(sn.dstg); + + for (dst = list_head(&sn.dstg->dstg_tasks); dst; + dst = list_next(&sn.dstg->dstg_tasks, dst)) { + dsl_dataset_t *ds = dst->dst_arg1; + if (dst->dst_err) + dsl_dataset_name(ds, sn.failed); + } + +out: + while (osn = list_head(&sn.objsets)) { + list_remove(&sn.objsets, osn); + zil_resume(dmu_objset_zil(osn->os)); + dmu_objset_close(osn->os); + kmem_free(osn, sizeof (struct osnode)); + } + list_destroy(&sn.objsets); + + if (err) + (void) strcpy(fsname, sn.failed); + dsl_sync_task_group_destroy(sn.dstg); + spa_close(spa, FTAG); + return (err); +} + +static void +dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx) +{ + dnode_t *dn; + + while (dn = list_head(list)) { + ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); + ASSERT(dn->dn_dbuf->db_data_pending); + /* + * Initialize dn_zio outside dnode_sync() + * to accomodate meta-dnode + */ + dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; + ASSERT(dn->dn_zio); + + ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); + list_remove(list, dn); + dnode_sync(dn, tx); + } +} + +/* ARGSUSED */ +static void +ready(zio_t *zio, arc_buf_t *abuf, void *arg) +{ + blkptr_t *bp = zio->io_bp; + blkptr_t *bp_orig = &zio->io_bp_orig; + objset_impl_t *os = arg; + dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; + + ASSERT(bp == os->os_rootbp); + ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET); + ASSERT(BP_GET_LEVEL(bp) == 0); + + /* + * Update rootbp fill count. + */ + bp->blk_fill = 1; /* count the meta-dnode */ + for (int i = 0; i < dnp->dn_nblkptr; i++) + bp->blk_fill += dnp->dn_blkptr[i].blk_fill; + + if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { + ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig))); + } else { + if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg) + (void) dsl_dataset_block_kill(os->os_dsl_dataset, + &zio->io_bp_orig, zio, os->os_synctx); + dsl_dataset_block_born(os->os_dsl_dataset, bp, os->os_synctx); + } +} + +/* called from dsl */ +void +dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx) +{ + int txgoff; + zbookmark_t zb; + writeprops_t wp = { 0 }; + zio_t *zio; + list_t *list; + dbuf_dirty_record_t *dr; + + dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); + + ASSERT(dmu_tx_is_syncing(tx)); + /* XXX the write_done callback should really give us the tx... */ + os->os_synctx = tx; + + if (os->os_dsl_dataset == NULL) { + /* + * This is the MOS. If we have upgraded, + * spa_max_replication() could change, so reset + * os_copies here. + */ + os->os_copies = spa_max_replication(os->os_spa); + } + + /* + * Create the root block IO + */ + zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; + zb.zb_object = 0; + zb.zb_level = -1; /* for block ordering; it's level 0 on disk */ + zb.zb_blkid = 0; + + wp.wp_type = DMU_OT_OBJSET; + wp.wp_level = 0; /* on-disk BP level; see above */ + wp.wp_copies = os->os_copies; + wp.wp_oschecksum = os->os_checksum; + wp.wp_oscompress = os->os_compress; + + if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg)) { + (void) dsl_dataset_block_kill(os->os_dsl_dataset, + os->os_rootbp, pio, tx); + } + + arc_release(os->os_phys_buf, &os->os_phys_buf); + zio = arc_write(pio, os->os_spa, &wp, DMU_OS_IS_L2CACHEABLE(os), + tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, NULL, os, + ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); + + /* + * Sync meta-dnode - the parent IO for the sync is the root block + */ + os->os_meta_dnode->dn_zio = zio; + dnode_sync(os->os_meta_dnode, tx); + + txgoff = tx->tx_txg & TXG_MASK; + + dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], tx); + dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx); + + list = &os->os_meta_dnode->dn_dirty_records[txgoff]; + while (dr = list_head(list)) { + ASSERT(dr->dr_dbuf->db_level == 0); + list_remove(list, dr); + if (dr->dr_zio) + zio_nowait(dr->dr_zio); + } + /* + * Free intent log blocks up to this tx. + */ + zil_sync(os->os_zil, tx); + os->os_phys->os_zil_header = os->os_zil_header; + zio_nowait(zio); +} + +void +dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, + uint64_t *usedobjsp, uint64_t *availobjsp) +{ + dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp, + usedobjsp, availobjsp); +} + +uint64_t +dmu_objset_fsid_guid(objset_t *os) +{ + return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset)); +} + +void +dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) +{ + stat->dds_type = os->os->os_phys->os_type; + if (os->os->os_dsl_dataset) + dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat); +} + +void +dmu_objset_stats(objset_t *os, nvlist_t *nv) +{ + ASSERT(os->os->os_dsl_dataset || + os->os->os_phys->os_type == DMU_OST_META); + + if (os->os->os_dsl_dataset != NULL) + dsl_dataset_stats(os->os->os_dsl_dataset, nv); + + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, + os->os->os_phys->os_type); +} + +int +dmu_objset_is_snapshot(objset_t *os) +{ + if (os->os->os_dsl_dataset != NULL) + return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset)); + else + return (B_FALSE); +} + +int +dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, + boolean_t *conflict) +{ + dsl_dataset_t *ds = os->os->os_dsl_dataset; + uint64_t ignored; + + if (ds->ds_phys->ds_snapnames_zapobj == 0) + return (ENOENT); + + return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, + ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST, + real, maxlen, conflict)); +} + +int +dmu_snapshot_list_next(objset_t *os, int namelen, char *name, + uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) +{ + dsl_dataset_t *ds = os->os->os_dsl_dataset; + zap_cursor_t cursor; + zap_attribute_t attr; + + if (ds->ds_phys->ds_snapnames_zapobj == 0) + return (ENOENT); + + zap_cursor_init_serialized(&cursor, + ds->ds_dir->dd_pool->dp_meta_objset, + ds->ds_phys->ds_snapnames_zapobj, *offp); + + if (zap_cursor_retrieve(&cursor, &attr) != 0) { + zap_cursor_fini(&cursor); + return (ENOENT); + } + + if (strlen(attr.za_name) + 1 > namelen) { + zap_cursor_fini(&cursor); + return (ENAMETOOLONG); + } + + (void) strcpy(name, attr.za_name); + if (idp) + *idp = attr.za_first_integer; + if (case_conflict) + *case_conflict = attr.za_normalization_conflict; + zap_cursor_advance(&cursor); + *offp = zap_cursor_serialize(&cursor); + zap_cursor_fini(&cursor); + + return (0); +} + +int +dmu_dir_list_next(objset_t *os, int namelen, char *name, + uint64_t *idp, uint64_t *offp) +{ + dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; + zap_cursor_t cursor; + zap_attribute_t attr; + + /* there is no next dir on a snapshot! */ + if (os->os->os_dsl_dataset->ds_object != + dd->dd_phys->dd_head_dataset_obj) + return (ENOENT); + + zap_cursor_init_serialized(&cursor, + dd->dd_pool->dp_meta_objset, + dd->dd_phys->dd_child_dir_zapobj, *offp); + + if (zap_cursor_retrieve(&cursor, &attr) != 0) { + zap_cursor_fini(&cursor); + return (ENOENT); + } + + if (strlen(attr.za_name) + 1 > namelen) { + zap_cursor_fini(&cursor); + return (ENAMETOOLONG); + } + + (void) strcpy(name, attr.za_name); + if (idp) + *idp = attr.za_first_integer; + zap_cursor_advance(&cursor); + *offp = zap_cursor_serialize(&cursor); + zap_cursor_fini(&cursor); + + return (0); +} + +struct findarg { + int (*func)(char *, void *); + void *arg; +}; + +/* ARGSUSED */ +static int +findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) +{ + struct findarg *fa = arg; + return (fa->func((char *)dsname, fa->arg)); +} + +/* + * Find all objsets under name, and for each, call 'func(child_name, arg)'. + * Perhaps change all callers to use dmu_objset_find_spa()? + */ +int +dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags) +{ + struct findarg fa; + fa.func = func; + fa.arg = arg; + return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags)); +} + +/* + * Find all objsets under name, call func on each + */ +int +dmu_objset_find_spa(spa_t *spa, const char *name, + int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags) +{ + dsl_dir_t *dd; + dsl_pool_t *dp; + dsl_dataset_t *ds; + zap_cursor_t zc; + zap_attribute_t *attr; + char *child; + uint64_t thisobj; + int err; + + if (name == NULL) + name = spa_name(spa); + err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL); + if (err) + return (err); + + /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ + if (dd->dd_myname[0] == '$') { + dsl_dir_close(dd, FTAG); + return (0); + } + + thisobj = dd->dd_phys->dd_head_dataset_obj; + attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); + dp = dd->dd_pool; + + /* + * Iterate over all children. + */ + if (flags & DS_FIND_CHILDREN) { + for (zap_cursor_init(&zc, dp->dp_meta_objset, + dd->dd_phys->dd_child_dir_zapobj); + zap_cursor_retrieve(&zc, attr) == 0; + (void) zap_cursor_advance(&zc)) { + ASSERT(attr->za_integer_length == sizeof (uint64_t)); + ASSERT(attr->za_num_integers == 1); + + child = kmem_alloc(MAXPATHLEN, KM_SLEEP); + (void) strcpy(child, name); + (void) strcat(child, "/"); + (void) strcat(child, attr->za_name); + err = dmu_objset_find_spa(spa, child, func, arg, flags); + kmem_free(child, MAXPATHLEN); + if (err) + break; + } + zap_cursor_fini(&zc); + + if (err) { + dsl_dir_close(dd, FTAG); + kmem_free(attr, sizeof (zap_attribute_t)); + return (err); + } + } + + /* + * Iterate over all snapshots. + */ + if (flags & DS_FIND_SNAPSHOTS) { + if (!dsl_pool_sync_context(dp)) + rw_enter(&dp->dp_config_rwlock, RW_READER); + err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); + if (!dsl_pool_sync_context(dp)) + rw_exit(&dp->dp_config_rwlock); + + if (err == 0) { + uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; + dsl_dataset_rele(ds, FTAG); + + for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); + zap_cursor_retrieve(&zc, attr) == 0; + (void) zap_cursor_advance(&zc)) { + ASSERT(attr->za_integer_length == + sizeof (uint64_t)); + ASSERT(attr->za_num_integers == 1); + + child = kmem_alloc(MAXPATHLEN, KM_SLEEP); + (void) strcpy(child, name); + (void) strcat(child, "@"); + (void) strcat(child, attr->za_name); + err = func(spa, attr->za_first_integer, + child, arg); + kmem_free(child, MAXPATHLEN); + if (err) + break; + } + zap_cursor_fini(&zc); + } + } + + dsl_dir_close(dd, FTAG); + kmem_free(attr, sizeof (zap_attribute_t)); + + if (err) + return (err); + + /* + * Apply to self if appropriate. + */ + err = func(spa, thisobj, name, arg); + return (err); +} + +/* ARGSUSED */ +int +dmu_objset_prefetch(char *name, void *arg) +{ + objset_t *os; + dsl_dataset_t *ds; + + os = kmem_alloc(sizeof (objset_t), KM_SLEEP); + if (dsl_dataset_hold(name, os, &ds)) { + kmem_free(os, sizeof (objset_t)); + return (0); + } + + if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) { + uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; + zbookmark_t zb; + + zb.zb_objset = ds->ds_object; + zb.zb_object = 0; + zb.zb_level = -1; + zb.zb_blkid = 0; + + (void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds), + &ds->ds_phys->ds_bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, &zb); + } + + dsl_dataset_rele(ds, os); + kmem_free(os, sizeof (objset_t)); + return (0); +} + +void +dmu_objset_set_user(objset_t *os, void *user_ptr) +{ + ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); + os->os->os_user_ptr = user_ptr; +} + +void * +dmu_objset_get_user(objset_t *os) +{ + ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); + return (os->os->os_user_ptr); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_send.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_send.c new file mode 100644 index 000000000000..857b9a343fd2 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_send.c @@ -0,0 +1,1181 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char *dmu_recv_tag = "dmu_recv_tag"; + +struct backuparg { + dmu_replay_record_t *drr; + vnode_t *vp; + offset_t *off; + objset_t *os; + zio_cksum_t zc; + int err; +}; + +static int +dump_bytes(struct backuparg *ba, void *buf, int len) +{ + ssize_t resid; /* have to get resid to get detailed errno */ + ASSERT3U(len % 8, ==, 0); + + fletcher_4_incremental_native(buf, len, &ba->zc); + ba->err = vn_rdwr(UIO_WRITE, ba->vp, + (caddr_t)buf, len, + 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); + *ba->off += len; + return (ba->err); +} + +static int +dump_free(struct backuparg *ba, uint64_t object, uint64_t offset, + uint64_t length) +{ + /* write a FREE record */ + bzero(ba->drr, sizeof (dmu_replay_record_t)); + ba->drr->drr_type = DRR_FREE; + ba->drr->drr_u.drr_free.drr_object = object; + ba->drr->drr_u.drr_free.drr_offset = offset; + ba->drr->drr_u.drr_free.drr_length = length; + + if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) + return (EINTR); + return (0); +} + +static int +dump_data(struct backuparg *ba, dmu_object_type_t type, + uint64_t object, uint64_t offset, int blksz, void *data) +{ + /* write a DATA record */ + bzero(ba->drr, sizeof (dmu_replay_record_t)); + ba->drr->drr_type = DRR_WRITE; + ba->drr->drr_u.drr_write.drr_object = object; + ba->drr->drr_u.drr_write.drr_type = type; + ba->drr->drr_u.drr_write.drr_offset = offset; + ba->drr->drr_u.drr_write.drr_length = blksz; + + if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) + return (EINTR); + if (dump_bytes(ba, data, blksz)) + return (EINTR); + return (0); +} + +static int +dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs) +{ + /* write a FREEOBJECTS record */ + bzero(ba->drr, sizeof (dmu_replay_record_t)); + ba->drr->drr_type = DRR_FREEOBJECTS; + ba->drr->drr_u.drr_freeobjects.drr_firstobj = firstobj; + ba->drr->drr_u.drr_freeobjects.drr_numobjs = numobjs; + + if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) + return (EINTR); + return (0); +} + +static int +dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp) +{ + if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) + return (dump_freeobjects(ba, object, 1)); + + /* write an OBJECT record */ + bzero(ba->drr, sizeof (dmu_replay_record_t)); + ba->drr->drr_type = DRR_OBJECT; + ba->drr->drr_u.drr_object.drr_object = object; + ba->drr->drr_u.drr_object.drr_type = dnp->dn_type; + ba->drr->drr_u.drr_object.drr_bonustype = dnp->dn_bonustype; + ba->drr->drr_u.drr_object.drr_blksz = + dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; + ba->drr->drr_u.drr_object.drr_bonuslen = dnp->dn_bonuslen; + ba->drr->drr_u.drr_object.drr_checksum = dnp->dn_checksum; + ba->drr->drr_u.drr_object.drr_compress = dnp->dn_compress; + + if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) + return (EINTR); + + if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8))) + return (EINTR); + + /* free anything past the end of the file */ + if (dump_free(ba, object, (dnp->dn_maxblkid + 1) * + (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL)) + return (EINTR); + if (ba->err) + return (EINTR); + return (0); +} + +#define BP_SPAN(dnp, level) \ + (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ + (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) + +static int +backup_cb(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb, + const dnode_phys_t *dnp, void *arg) +{ + struct backuparg *ba = arg; + dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; + int err = 0; + + if (issig(JUSTLOOKING) && issig(FORREAL)) + return (EINTR); + + if (bp == NULL && zb->zb_object == 0) { + uint64_t span = BP_SPAN(dnp, zb->zb_level); + uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; + err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT); + } else if (bp == NULL) { + uint64_t span = BP_SPAN(dnp, zb->zb_level); + err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span); + } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { + return (0); + } else if (type == DMU_OT_DNODE) { + dnode_phys_t *blk; + int i; + int blksz = BP_GET_LSIZE(bp); + uint32_t aflags = ARC_WAIT; + arc_buf_t *abuf; + + if (arc_read_nolock(NULL, spa, bp, + arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL, &aflags, zb) != 0) + return (EIO); + + blk = abuf->b_data; + for (i = 0; i < blksz >> DNODE_SHIFT; i++) { + uint64_t dnobj = (zb->zb_blkid << + (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; + err = dump_dnode(ba, dnobj, blk+i); + if (err) + break; + } + (void) arc_buf_remove_ref(abuf, &abuf); + } else { /* it's a level-0 block of a regular object */ + uint32_t aflags = ARC_WAIT; + arc_buf_t *abuf; + int blksz = BP_GET_LSIZE(bp); + + if (arc_read_nolock(NULL, spa, bp, + arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL, &aflags, zb) != 0) + return (EIO); + + err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz, + blksz, abuf->b_data); + (void) arc_buf_remove_ref(abuf, &abuf); + } + + ASSERT(err == 0 || err == EINTR); + return (err); +} + +int +dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, + vnode_t *vp, offset_t *off) +{ + dsl_dataset_t *ds = tosnap->os->os_dsl_dataset; + dsl_dataset_t *fromds = fromsnap ? fromsnap->os->os_dsl_dataset : NULL; + dmu_replay_record_t *drr; + struct backuparg ba; + int err; + uint64_t fromtxg = 0; + + /* tosnap must be a snapshot */ + if (ds->ds_phys->ds_next_snap_obj == 0) + return (EINVAL); + + /* fromsnap must be an earlier snapshot from the same fs as tosnap */ + if (fromds && (ds->ds_dir != fromds->ds_dir || + fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg)) + return (EXDEV); + + if (fromorigin) { + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + if (fromsnap) + return (EINVAL); + + if (dsl_dir_is_clone(ds->ds_dir)) { + rw_enter(&dp->dp_config_rwlock, RW_READER); + err = dsl_dataset_hold_obj(dp, + ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds); + rw_exit(&dp->dp_config_rwlock); + if (err) + return (err); + } else { + fromorigin = B_FALSE; + } + } + + + drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); + drr->drr_type = DRR_BEGIN; + drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; + drr->drr_u.drr_begin.drr_version = DMU_BACKUP_STREAM_VERSION; + drr->drr_u.drr_begin.drr_creation_time = + ds->ds_phys->ds_creation_time; + drr->drr_u.drr_begin.drr_type = tosnap->os->os_phys->os_type; + if (fromorigin) + drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; + drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; + if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) + drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; + + if (fromds) + drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; + dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); + + if (fromds) + fromtxg = fromds->ds_phys->ds_creation_txg; + if (fromorigin) + dsl_dataset_rele(fromds, FTAG); + + ba.drr = drr; + ba.vp = vp; + ba.os = tosnap; + ba.off = off; + ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0); + + if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { + kmem_free(drr, sizeof (dmu_replay_record_t)); + return (ba.err); + } + + err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH, + backup_cb, &ba); + + if (err) { + if (err == EINTR && ba.err) + err = ba.err; + kmem_free(drr, sizeof (dmu_replay_record_t)); + return (err); + } + + bzero(drr, sizeof (dmu_replay_record_t)); + drr->drr_type = DRR_END; + drr->drr_u.drr_end.drr_checksum = ba.zc; + + if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { + kmem_free(drr, sizeof (dmu_replay_record_t)); + return (ba.err); + } + + kmem_free(drr, sizeof (dmu_replay_record_t)); + + return (0); +} + +struct recvbeginsyncarg { + const char *tofs; + const char *tosnap; + dsl_dataset_t *origin; + uint64_t fromguid; + dmu_objset_type_t type; + void *tag; + boolean_t force; + uint64_t dsflags; + char clonelastname[MAXNAMELEN]; + dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */ +}; + +static dsl_dataset_t * +recv_full_sync_impl(dsl_pool_t *dp, uint64_t dsobj, dmu_objset_type_t type, + cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds; + + /* This should always work, since we just created it */ + /* XXX - create should return an owned ds */ + VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, + DS_MODE_INCONSISTENT, dmu_recv_tag, &ds)); + + if (type != DMU_OST_NONE) { + (void) dmu_objset_create_impl(dp->dp_spa, + ds, &ds->ds_phys->ds_bp, type, tx); + } + + spa_history_internal_log(LOG_DS_REPLAY_FULL_SYNC, + dp->dp_spa, tx, cr, "dataset = %lld", dsobj); + + return (ds); +} + +/* ARGSUSED */ +static int +recv_full_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct recvbeginsyncarg *rbsa = arg2; + objset_t *mos = dd->dd_pool->dp_meta_objset; + uint64_t val; + int err; + + err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, + strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val); + + if (err != ENOENT) + return (err ? err : EEXIST); + + if (rbsa->origin) { + /* make sure it's a snap in the same pool */ + if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool) + return (EXDEV); + if (rbsa->origin->ds_phys->ds_num_children == 0) + return (EINVAL); + if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) + return (ENODEV); + } + + return (0); +} + +static void +recv_full_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct recvbeginsyncarg *rbsa = arg2; + uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags; + uint64_t dsobj; + + dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1, + rbsa->origin, flags, cr, tx); + + rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, + rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); +} + +static int +recv_full_existing_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct recvbeginsyncarg *rbsa = arg2; + int err; + + /* must be a head ds */ + if (ds->ds_phys->ds_next_snap_obj != 0) + return (EINVAL); + + /* must not be a clone ds */ + if (dsl_dir_is_clone(ds->ds_dir)) + return (EINVAL); + + err = dsl_dataset_destroy_check(ds, rbsa->tag, tx); + if (err) + return (err); + + if (rbsa->origin) { + /* make sure it's a snap in the same pool */ + if (rbsa->origin->ds_dir->dd_pool != ds->ds_dir->dd_pool) + return (EXDEV); + if (rbsa->origin->ds_phys->ds_num_children == 0) + return (EINVAL); + if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) + return (ENODEV); + } + + return (0); +} + +static void +recv_full_existing_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct recvbeginsyncarg *rbsa = arg2; + dsl_dir_t *dd = ds->ds_dir; + uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags; + uint64_t dsobj; + + /* + * NB: caller must provide an extra hold on the dsl_dir_t, so it + * won't go away when dsl_dataset_destroy_sync() closes the + * dataset. + */ + dsl_dataset_destroy_sync(ds, rbsa->tag, cr, tx); + + dsobj = dsl_dataset_create_sync_dd(dd, rbsa->origin, flags, tx); + + rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, + rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); +} + +/* ARGSUSED */ +static int +recv_incremental_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct recvbeginsyncarg *rbsa = arg2; + int err; + uint64_t val; + + /* must not have any changes since most recent snapshot */ + if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds)) + return (ETXTBSY); + + /* must already be a snapshot of this fs */ + if (ds->ds_phys->ds_prev_snap_obj == 0) + return (ENODEV); + + /* most recent snapshot must match fromguid */ + if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) + return (ENODEV); + + /* temporary clone name must not exist */ + err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, + ds->ds_dir->dd_phys->dd_child_dir_zapobj, + rbsa->clonelastname, 8, 1, &val); + if (err == 0) + return (EEXIST); + if (err != ENOENT) + return (err); + + /* new snapshot name must not exist */ + err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, + ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val); + if (err == 0) + return (EEXIST); + if (err != ENOENT) + return (err); + return (0); +} + +/* ARGSUSED */ +static void +recv_online_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ohds = arg1; + struct recvbeginsyncarg *rbsa = arg2; + dsl_pool_t *dp = ohds->ds_dir->dd_pool; + dsl_dataset_t *ods, *cds; + uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags; + uint64_t dsobj; + + /* create the temporary clone */ + VERIFY(0 == dsl_dataset_hold_obj(dp, ohds->ds_phys->ds_prev_snap_obj, + FTAG, &ods)); + dsobj = dsl_dataset_create_sync(ohds->ds_dir, + rbsa->clonelastname, ods, flags, cr, tx); + dsl_dataset_rele(ods, FTAG); + + /* open the temporary clone */ + VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, + DS_MODE_INCONSISTENT, dmu_recv_tag, &cds)); + + /* copy the refquota from the target fs to the clone */ + if (ohds->ds_quota > 0) + dsl_dataset_set_quota_sync(cds, &ohds->ds_quota, cr, tx); + + rbsa->ds = cds; + + spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, + dp->dp_spa, tx, cr, "dataset = %lld", dsobj); +} + +/* ARGSUSED */ +static void +recv_offline_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; + + spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, + ds->ds_dir->dd_pool->dp_spa, tx, cr, "dataset = %lld", + ds->ds_object); +} + +/* + * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() + * succeeds; otherwise we will leak the holds on the datasets. + */ +int +dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, + boolean_t force, objset_t *origin, boolean_t online, dmu_recv_cookie_t *drc) +{ + int err = 0; + boolean_t byteswap; + struct recvbeginsyncarg rbsa; + uint64_t version; + int flags; + dsl_dataset_t *ds; + + if (drrb->drr_magic == DMU_BACKUP_MAGIC) + byteswap = FALSE; + else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) + byteswap = TRUE; + else + return (EINVAL); + + rbsa.tofs = tofs; + rbsa.tosnap = tosnap; + rbsa.origin = origin ? origin->os->os_dsl_dataset : NULL; + rbsa.fromguid = drrb->drr_fromguid; + rbsa.type = drrb->drr_type; + rbsa.tag = FTAG; + rbsa.dsflags = 0; + version = drrb->drr_version; + flags = drrb->drr_flags; + + if (byteswap) { + rbsa.type = BSWAP_32(rbsa.type); + rbsa.fromguid = BSWAP_64(rbsa.fromguid); + version = BSWAP_64(version); + flags = BSWAP_32(flags); + } + + if (version != DMU_BACKUP_STREAM_VERSION || + rbsa.type >= DMU_OST_NUMTYPES || + ((flags & DRR_FLAG_CLONE) && origin == NULL)) + return (EINVAL); + + if (flags & DRR_FLAG_CI_DATA) + rbsa.dsflags = DS_FLAG_CI_DATASET; + + bzero(drc, sizeof (dmu_recv_cookie_t)); + drc->drc_drrb = drrb; + drc->drc_tosnap = tosnap; + drc->drc_force = force; + + /* + * Process the begin in syncing context. + */ + if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE) && !online) { + /* offline incremental receive */ + err = dsl_dataset_own(tofs, 0, dmu_recv_tag, &ds); + if (err) + return (err); + + /* + * Only do the rollback if the most recent snapshot + * matches the incremental source + */ + if (force) { + if (ds->ds_prev == NULL || + ds->ds_prev->ds_phys->ds_guid != + rbsa.fromguid) { + dsl_dataset_disown(ds, dmu_recv_tag); + return (ENODEV); + } + (void) dsl_dataset_rollback(ds, DMU_OST_NONE); + } + rbsa.force = B_FALSE; + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + recv_incremental_check, + recv_offline_incremental_sync, ds, &rbsa, 1); + if (err) { + dsl_dataset_disown(ds, dmu_recv_tag); + return (err); + } + drc->drc_logical_ds = drc->drc_real_ds = ds; + } else if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE)) { + /* online incremental receive */ + + /* tmp clone name is: tofs/%tosnap" */ + (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname), + "%%%s", tosnap); + + /* open the dataset we are logically receiving into */ + err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds); + if (err) + return (err); + + rbsa.force = force; + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + recv_incremental_check, + recv_online_incremental_sync, ds, &rbsa, 5); + if (err) { + dsl_dataset_rele(ds, dmu_recv_tag); + return (err); + } + drc->drc_logical_ds = ds; + drc->drc_real_ds = rbsa.ds; + } else { + /* create new fs -- full backup or clone */ + dsl_dir_t *dd = NULL; + const char *tail; + + err = dsl_dir_open(tofs, FTAG, &dd, &tail); + if (err) + return (err); + if (tail == NULL) { + if (!force) { + dsl_dir_close(dd, FTAG); + return (EEXIST); + } + + rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); + err = dsl_dataset_own_obj(dd->dd_pool, + dd->dd_phys->dd_head_dataset_obj, + DS_MODE_INCONSISTENT, FTAG, &ds); + rw_exit(&dd->dd_pool->dp_config_rwlock); + if (err) { + dsl_dir_close(dd, FTAG); + return (err); + } + + dsl_dataset_make_exclusive(ds, FTAG); + err = dsl_sync_task_do(dd->dd_pool, + recv_full_existing_check, + recv_full_existing_sync, ds, &rbsa, 5); + dsl_dataset_disown(ds, FTAG); + } else { + err = dsl_sync_task_do(dd->dd_pool, recv_full_check, + recv_full_sync, dd, &rbsa, 5); + } + dsl_dir_close(dd, FTAG); + if (err) + return (err); + drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds; + drc->drc_newfs = B_TRUE; + } + + return (0); +} + +struct restorearg { + int err; + int byteswap; + vnode_t *vp; + char *buf; + uint64_t voff; + int bufsize; /* amount of memory allocated for buf */ + zio_cksum_t cksum; +}; + +static void * +restore_read(struct restorearg *ra, int len) +{ + void *rv; + int done = 0; + + /* some things will require 8-byte alignment, so everything must */ + ASSERT3U(len % 8, ==, 0); + + while (done < len) { + ssize_t resid; + + ra->err = vn_rdwr(UIO_READ, ra->vp, + (caddr_t)ra->buf + done, len - done, + ra->voff, UIO_SYSSPACE, FAPPEND, + RLIM64_INFINITY, CRED(), &resid); + + if (resid == len - done) + ra->err = EINVAL; + ra->voff += len - done - resid; + done = len - resid; + if (ra->err) + return (NULL); + } + + ASSERT3U(done, ==, len); + rv = ra->buf; + if (ra->byteswap) + fletcher_4_incremental_byteswap(rv, len, &ra->cksum); + else + fletcher_4_incremental_native(rv, len, &ra->cksum); + return (rv); +} + +static void +backup_byteswap(dmu_replay_record_t *drr) +{ +#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) +#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) + drr->drr_type = BSWAP_32(drr->drr_type); + drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); + switch (drr->drr_type) { + case DRR_BEGIN: + DO64(drr_begin.drr_magic); + DO64(drr_begin.drr_version); + DO64(drr_begin.drr_creation_time); + DO32(drr_begin.drr_type); + DO32(drr_begin.drr_flags); + DO64(drr_begin.drr_toguid); + DO64(drr_begin.drr_fromguid); + break; + case DRR_OBJECT: + DO64(drr_object.drr_object); + /* DO64(drr_object.drr_allocation_txg); */ + DO32(drr_object.drr_type); + DO32(drr_object.drr_bonustype); + DO32(drr_object.drr_blksz); + DO32(drr_object.drr_bonuslen); + break; + case DRR_FREEOBJECTS: + DO64(drr_freeobjects.drr_firstobj); + DO64(drr_freeobjects.drr_numobjs); + break; + case DRR_WRITE: + DO64(drr_write.drr_object); + DO32(drr_write.drr_type); + DO64(drr_write.drr_offset); + DO64(drr_write.drr_length); + break; + case DRR_FREE: + DO64(drr_free.drr_object); + DO64(drr_free.drr_offset); + DO64(drr_free.drr_length); + break; + case DRR_END: + DO64(drr_end.drr_checksum.zc_word[0]); + DO64(drr_end.drr_checksum.zc_word[1]); + DO64(drr_end.drr_checksum.zc_word[2]); + DO64(drr_end.drr_checksum.zc_word[3]); + break; + } +#undef DO64 +#undef DO32 +} + +static int +restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) +{ + int err; + dmu_tx_t *tx; + void *data = NULL; + + err = dmu_object_info(os, drro->drr_object, NULL); + + if (err != 0 && err != ENOENT) + return (EINVAL); + + if (drro->drr_type == DMU_OT_NONE || + drro->drr_type >= DMU_OT_NUMTYPES || + drro->drr_bonustype >= DMU_OT_NUMTYPES || + drro->drr_checksum >= ZIO_CHECKSUM_FUNCTIONS || + drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || + P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || + drro->drr_blksz < SPA_MINBLOCKSIZE || + drro->drr_blksz > SPA_MAXBLOCKSIZE || + drro->drr_bonuslen > DN_MAX_BONUSLEN) { + return (EINVAL); + } + + if (drro->drr_bonuslen) { + data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); + if (ra->err) + return (ra->err); + } + + tx = dmu_tx_create(os); + + if (err == ENOENT) { + /* currently free, want to be allocated */ + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1); + err = dmu_tx_assign(tx, TXG_WAIT); + if (err) { + dmu_tx_abort(tx); + return (err); + } + err = dmu_object_claim(os, drro->drr_object, + drro->drr_type, drro->drr_blksz, + drro->drr_bonustype, drro->drr_bonuslen, tx); + } else { + /* currently allocated, want to be allocated */ + dmu_tx_hold_bonus(tx, drro->drr_object); + /* + * We may change blocksize, so need to + * hold_write + */ + dmu_tx_hold_write(tx, drro->drr_object, 0, 1); + err = dmu_tx_assign(tx, TXG_WAIT); + if (err) { + dmu_tx_abort(tx); + return (err); + } + + err = dmu_object_reclaim(os, drro->drr_object, + drro->drr_type, drro->drr_blksz, + drro->drr_bonustype, drro->drr_bonuslen, tx); + } + if (err) { + dmu_tx_commit(tx); + return (EINVAL); + } + + dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksum, tx); + dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); + + if (data != NULL) { + dmu_buf_t *db; + + VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); + dmu_buf_will_dirty(db, tx); + + ASSERT3U(db->db_size, >=, drro->drr_bonuslen); + bcopy(data, db->db_data, drro->drr_bonuslen); + if (ra->byteswap) { + dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data, + drro->drr_bonuslen); + } + dmu_buf_rele(db, FTAG); + } + dmu_tx_commit(tx); + return (0); +} + +/* ARGSUSED */ +static int +restore_freeobjects(struct restorearg *ra, objset_t *os, + struct drr_freeobjects *drrfo) +{ + uint64_t obj; + + if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) + return (EINVAL); + + for (obj = drrfo->drr_firstobj; + obj < drrfo->drr_firstobj + drrfo->drr_numobjs; + (void) dmu_object_next(os, &obj, FALSE, 0)) { + int err; + + if (dmu_object_info(os, obj, NULL) != 0) + continue; + + err = dmu_free_object(os, obj); + if (err) + return (err); + } + return (0); +} + +static int +restore_write(struct restorearg *ra, objset_t *os, + struct drr_write *drrw) +{ + dmu_tx_t *tx; + void *data; + int err; + + if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || + drrw->drr_type >= DMU_OT_NUMTYPES) + return (EINVAL); + + data = restore_read(ra, drrw->drr_length); + if (data == NULL) + return (ra->err); + + if (dmu_object_info(os, drrw->drr_object, NULL) != 0) + return (EINVAL); + + tx = dmu_tx_create(os); + + dmu_tx_hold_write(tx, drrw->drr_object, + drrw->drr_offset, drrw->drr_length); + err = dmu_tx_assign(tx, TXG_WAIT); + if (err) { + dmu_tx_abort(tx); + return (err); + } + if (ra->byteswap) + dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length); + dmu_write(os, drrw->drr_object, + drrw->drr_offset, drrw->drr_length, data, tx); + dmu_tx_commit(tx); + return (0); +} + +/* ARGSUSED */ +static int +restore_free(struct restorearg *ra, objset_t *os, + struct drr_free *drrf) +{ + int err; + + if (drrf->drr_length != -1ULL && + drrf->drr_offset + drrf->drr_length < drrf->drr_offset) + return (EINVAL); + + if (dmu_object_info(os, drrf->drr_object, NULL) != 0) + return (EINVAL); + + err = dmu_free_long_range(os, drrf->drr_object, + drrf->drr_offset, drrf->drr_length); + return (err); +} + +void +dmu_recv_abort_cleanup(dmu_recv_cookie_t *drc) +{ + if (drc->drc_newfs || drc->drc_real_ds != drc->drc_logical_ds) { + /* + * online incremental or new fs: destroy the fs (which + * may be a clone) that we created + */ + (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); + if (drc->drc_real_ds != drc->drc_logical_ds) + dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag); + } else { + /* + * offline incremental: rollback to most recent snapshot. + */ + (void) dsl_dataset_rollback(drc->drc_real_ds, DMU_OST_NONE); + dsl_dataset_disown(drc->drc_real_ds, dmu_recv_tag); + } +} + +/* + * NB: callers *must* call dmu_recv_end() if this succeeds. + */ +int +dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp) +{ + struct restorearg ra = { 0 }; + dmu_replay_record_t *drr; + objset_t *os; + zio_cksum_t pcksum; + + if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) + ra.byteswap = TRUE; + + { + /* compute checksum of drr_begin record */ + dmu_replay_record_t *drr; + drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); + + drr->drr_type = DRR_BEGIN; + drr->drr_u.drr_begin = *drc->drc_drrb; + if (ra.byteswap) { + fletcher_4_incremental_byteswap(drr, + sizeof (dmu_replay_record_t), &ra.cksum); + } else { + fletcher_4_incremental_native(drr, + sizeof (dmu_replay_record_t), &ra.cksum); + } + kmem_free(drr, sizeof (dmu_replay_record_t)); + } + + if (ra.byteswap) { + struct drr_begin *drrb = drc->drc_drrb; + drrb->drr_magic = BSWAP_64(drrb->drr_magic); + drrb->drr_version = BSWAP_64(drrb->drr_version); + drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); + drrb->drr_type = BSWAP_32(drrb->drr_type); + drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); + drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); + } + + ra.vp = vp; + ra.voff = *voffp; + ra.bufsize = 1<<20; + ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); + + /* these were verified in dmu_recv_begin */ + ASSERT(drc->drc_drrb->drr_version == DMU_BACKUP_STREAM_VERSION); + ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES); + + /* + * Open the objset we are modifying. + */ + VERIFY(dmu_objset_open_ds(drc->drc_real_ds, DMU_OST_ANY, &os) == 0); + + ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); + + /* + * Read records and process them. + */ + pcksum = ra.cksum; + while (ra.err == 0 && + NULL != (drr = restore_read(&ra, sizeof (*drr)))) { + if (issig(JUSTLOOKING) && issig(FORREAL)) { + ra.err = EINTR; + goto out; + } + + if (ra.byteswap) + backup_byteswap(drr); + + switch (drr->drr_type) { + case DRR_OBJECT: + { + /* + * We need to make a copy of the record header, + * because restore_{object,write} may need to + * restore_read(), which will invalidate drr. + */ + struct drr_object drro = drr->drr_u.drr_object; + ra.err = restore_object(&ra, os, &drro); + break; + } + case DRR_FREEOBJECTS: + { + struct drr_freeobjects drrfo = + drr->drr_u.drr_freeobjects; + ra.err = restore_freeobjects(&ra, os, &drrfo); + break; + } + case DRR_WRITE: + { + struct drr_write drrw = drr->drr_u.drr_write; + ra.err = restore_write(&ra, os, &drrw); + break; + } + case DRR_FREE: + { + struct drr_free drrf = drr->drr_u.drr_free; + ra.err = restore_free(&ra, os, &drrf); + break; + } + case DRR_END: + { + struct drr_end drre = drr->drr_u.drr_end; + /* + * We compare against the *previous* checksum + * value, because the stored checksum is of + * everything before the DRR_END record. + */ + if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) + ra.err = ECKSUM; + goto out; + } + default: + ra.err = EINVAL; + goto out; + } + pcksum = ra.cksum; + } + ASSERT(ra.err != 0); + +out: + dmu_objset_close(os); + + if (ra.err != 0) { + /* + * rollback or destroy what we created, so we don't + * leave it in the restoring state. + */ + txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); + dmu_recv_abort_cleanup(drc); + } + + kmem_free(ra.buf, ra.bufsize); + *voffp = ra.voff; + return (ra.err); +} + +struct recvendsyncarg { + char *tosnap; + uint64_t creation_time; + uint64_t toguid; +}; + +static int +recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct recvendsyncarg *resa = arg2; + + return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx)); +} + +static void +recv_end_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct recvendsyncarg *resa = arg2; + + dsl_dataset_snapshot_sync(ds, resa->tosnap, cr, tx); + + /* set snapshot's creation time and guid */ + dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); + ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time; + ds->ds_prev->ds_phys->ds_guid = resa->toguid; + ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; +} + +int +dmu_recv_end(dmu_recv_cookie_t *drc) +{ + struct recvendsyncarg resa; + dsl_dataset_t *ds = drc->drc_logical_ds; + int err; + + /* + * XXX hack; seems the ds is still dirty and + * dsl_pool_zil_clean() expects it to have a ds_user_ptr + * (and zil), but clone_swap() can close it. + */ + txg_wait_synced(ds->ds_dir->dd_pool, 0); + + if (ds != drc->drc_real_ds) { + /* we are doing an online recv */ + if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) { + err = dsl_dataset_clone_swap(drc->drc_real_ds, ds, + drc->drc_force); + if (err) + dsl_dataset_disown(ds, dmu_recv_tag); + } else { + err = EBUSY; + dsl_dataset_rele(ds, dmu_recv_tag); + } + /* dsl_dataset_destroy() will disown the ds */ + (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); + if (err) + return (err); + } + + resa.creation_time = drc->drc_drrb->drr_creation_time; + resa.toguid = drc->drc_drrb->drr_toguid; + resa.tosnap = drc->drc_tosnap; + + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + recv_end_check, recv_end_sync, ds, &resa, 3); + if (err) { + if (drc->drc_newfs) { + ASSERT(ds == drc->drc_real_ds); + (void) dsl_dataset_destroy(ds, dmu_recv_tag); + return (err); + } else { + (void) dsl_dataset_rollback(ds, DMU_OST_NONE); + } + } + + /* release the hold from dmu_recv_begin */ + dsl_dataset_disown(ds, dmu_recv_tag); + return (err); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_traverse.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_traverse.c new file mode 100644 index 000000000000..512401470773 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_traverse.c @@ -0,0 +1,406 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SET_BOOKMARK(zb, objset, object, level, blkid) \ +{ \ + (zb)->zb_objset = objset; \ + (zb)->zb_object = object; \ + (zb)->zb_level = level; \ + (zb)->zb_blkid = blkid; \ +} + +struct prefetch_data { + kmutex_t pd_mtx; + kcondvar_t pd_cv; + int pd_blks_max; + int pd_blks_fetched; + int pd_flags; + boolean_t pd_cancel; + boolean_t pd_exited; +}; + +struct traverse_data { + spa_t *td_spa; + uint64_t td_objset; + blkptr_t *td_rootbp; + uint64_t td_min_txg; + int td_flags; + struct prefetch_data *td_pfd; + blkptr_cb_t *td_func; + void *td_arg; +}; + +/* ARGSUSED */ +static void +traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) +{ + struct traverse_data *td = arg; + zbookmark_t zb; + + if (bp->blk_birth == 0) + return; + + if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa)) + return; + + zb.zb_objset = td->td_objset; + zb.zb_object = 0; + zb.zb_level = -1; + zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; + VERIFY(0 == td->td_func(td->td_spa, bp, &zb, NULL, td->td_arg)); +} + +/* ARGSUSED */ +static void +traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) +{ + struct traverse_data *td = arg; + + if (lrc->lrc_txtype == TX_WRITE) { + lr_write_t *lr = (lr_write_t *)lrc; + blkptr_t *bp = &lr->lr_blkptr; + zbookmark_t zb; + + if (bp->blk_birth == 0) + return; + + if (claim_txg == 0 || bp->blk_birth < claim_txg) + return; + + zb.zb_objset = td->td_objset; + zb.zb_object = lr->lr_foid; + zb.zb_level = BP_GET_LEVEL(bp); + zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp); + VERIFY(0 == td->td_func(td->td_spa, bp, &zb, NULL, td->td_arg)); + } +} + +static void +traverse_zil(struct traverse_data *td, zil_header_t *zh) +{ + uint64_t claim_txg = zh->zh_claim_txg; + zilog_t *zilog; + + /* + * We only want to visit blocks that have been claimed but not yet + * replayed (or, in read-only mode, blocks that *would* be claimed). + */ + if (claim_txg == 0 && (spa_mode & FWRITE)) + return; + + zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh); + + (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td, + claim_txg); + + zil_free(zilog); +} + +static int +traverse_visitbp(struct traverse_data *td, const dnode_phys_t *dnp, + arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb) +{ + zbookmark_t czb; + int err = 0; + arc_buf_t *buf = NULL; + struct prefetch_data *pd = td->td_pfd; + + if (bp->blk_birth == 0) { + err = td->td_func(td->td_spa, NULL, zb, dnp, td->td_arg); + return (err); + } + + if (bp->blk_birth <= td->td_min_txg) + return (0); + + if (pd && !pd->pd_exited && + ((pd->pd_flags & TRAVERSE_PREFETCH_DATA) || + BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0)) { + mutex_enter(&pd->pd_mtx); + ASSERT(pd->pd_blks_fetched >= 0); + while (pd->pd_blks_fetched == 0 && !pd->pd_exited) + cv_wait(&pd->pd_cv, &pd->pd_mtx); + pd->pd_blks_fetched--; + cv_broadcast(&pd->pd_cv); + mutex_exit(&pd->pd_mtx); + } + + if (td->td_flags & TRAVERSE_PRE) { + err = td->td_func(td->td_spa, bp, zb, dnp, td->td_arg); + if (err) + return (err); + } + + if (BP_GET_LEVEL(bp) > 0) { + uint32_t flags = ARC_WAIT; + int i; + blkptr_t *cbp; + int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; + + err = arc_read(NULL, td->td_spa, bp, pbuf, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) + return (err); + + /* recursively visitbp() blocks below this */ + cbp = buf->b_data; + for (i = 0; i < epb; i++, cbp++) { + SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, + zb->zb_level - 1, + zb->zb_blkid * epb + i); + err = traverse_visitbp(td, dnp, buf, cbp, &czb); + if (err) + break; + } + } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { + uint32_t flags = ARC_WAIT; + int i, j; + int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; + + err = arc_read(NULL, td->td_spa, bp, pbuf, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) + return (err); + + /* recursively visitbp() blocks below this */ + dnp = buf->b_data; + for (i = 0; i < epb && err == 0; i++, dnp++) { + for (j = 0; j < dnp->dn_nblkptr; j++) { + SET_BOOKMARK(&czb, zb->zb_objset, + zb->zb_blkid * epb + i, + dnp->dn_nlevels - 1, j); + err = traverse_visitbp(td, dnp, buf, + (blkptr_t *)&dnp->dn_blkptr[j], &czb); + if (err) + break; + } + } + } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { + uint32_t flags = ARC_WAIT; + objset_phys_t *osp; + int j; + + err = arc_read_nolock(NULL, td->td_spa, bp, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) + return (err); + + osp = buf->b_data; + /* + * traverse_zil is just here for zdb's leak checking. + * For other consumers, there will be no ZIL blocks. + */ + traverse_zil(td, &osp->os_zil_header); + + for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) { + SET_BOOKMARK(&czb, zb->zb_objset, 0, + osp->os_meta_dnode.dn_nlevels - 1, j); + err = traverse_visitbp(td, &osp->os_meta_dnode, buf, + (blkptr_t *)&osp->os_meta_dnode.dn_blkptr[j], + &czb); + if (err) + break; + } + } + + if (buf) + (void) arc_buf_remove_ref(buf, &buf); + + if (err == 0 && (td->td_flags & TRAVERSE_POST)) + err = td->td_func(td->td_spa, bp, zb, dnp, td->td_arg); + + return (err); +} + +/* ARGSUSED */ +static int +traverse_prefetcher(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb, + const dnode_phys_t *dnp, void *arg) +{ + struct prefetch_data *pfd = arg; + uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; + + ASSERT(pfd->pd_blks_fetched >= 0); + if (pfd->pd_cancel) + return (EINTR); + + if (bp == NULL || !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) || + BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0)) + return (0); + + mutex_enter(&pfd->pd_mtx); + while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max) + cv_wait(&pfd->pd_cv, &pfd->pd_mtx); + pfd->pd_blks_fetched++; + cv_broadcast(&pfd->pd_cv); + mutex_exit(&pfd->pd_mtx); + + (void) arc_read_nolock(NULL, spa, bp, NULL, NULL, + ZIO_PRIORITY_ASYNC_READ, + ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, + &aflags, zb); + + return (0); +} + +static void +traverse_prefetch_thread(void *arg) +{ + struct traverse_data *td_main = arg; + struct traverse_data td = *td_main; + zbookmark_t czb; + + td.td_func = traverse_prefetcher; + td.td_arg = td_main->td_pfd; + td.td_pfd = NULL; + + SET_BOOKMARK(&czb, td.td_objset, 0, -1, 0); + (void) traverse_visitbp(&td, NULL, NULL, td.td_rootbp, &czb); + + mutex_enter(&td_main->td_pfd->pd_mtx); + td_main->td_pfd->pd_exited = B_TRUE; + cv_broadcast(&td_main->td_pfd->pd_cv); + mutex_exit(&td_main->td_pfd->pd_mtx); +} + +/* + * NB: dataset must not be changing on-disk (eg, is a snapshot or we are + * in syncing context). + */ +static int +traverse_impl(spa_t *spa, uint64_t objset, blkptr_t *rootbp, + uint64_t txg_start, int flags, blkptr_cb_t func, void *arg) +{ + struct traverse_data td; + struct prefetch_data pd = { 0 }; + zbookmark_t czb; + int err; + + td.td_spa = spa; + td.td_objset = objset; + td.td_rootbp = rootbp; + td.td_min_txg = txg_start; + td.td_func = func; + td.td_arg = arg; + td.td_pfd = &pd; + td.td_flags = flags; + + pd.pd_blks_max = 100; + pd.pd_flags = flags; + mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL); + cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL); + + if (!(flags & TRAVERSE_PREFETCH) || + 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread, + &td, TQ_NOQUEUE)) + pd.pd_exited = B_TRUE; + + SET_BOOKMARK(&czb, objset, 0, -1, 0); + err = traverse_visitbp(&td, NULL, NULL, rootbp, &czb); + + mutex_enter(&pd.pd_mtx); + pd.pd_cancel = B_TRUE; + cv_broadcast(&pd.pd_cv); + while (!pd.pd_exited) + cv_wait(&pd.pd_cv, &pd.pd_mtx); + mutex_exit(&pd.pd_mtx); + + mutex_destroy(&pd.pd_mtx); + cv_destroy(&pd.pd_cv); + + return (err); +} + +/* + * NB: dataset must not be changing on-disk (eg, is a snapshot or we are + * in syncing context). + */ +int +traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags, + blkptr_cb_t func, void *arg) +{ + return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds->ds_object, + &ds->ds_phys->ds_bp, txg_start, flags, func, arg)); +} + +/* + * NB: pool must not be changing on-disk (eg, from zdb or sync context). + */ +int +traverse_pool(spa_t *spa, blkptr_cb_t func, void *arg) +{ + int err; + uint64_t obj; + dsl_pool_t *dp = spa_get_dsl(spa); + objset_t *mos = dp->dp_meta_objset; + + /* visit the MOS */ + err = traverse_impl(spa, 0, spa_get_rootblkptr(spa), + 0, TRAVERSE_PRE, func, arg); + if (err) + return (err); + + /* visit each dataset */ + for (obj = 1; err == 0; err = dmu_object_next(mos, &obj, FALSE, 0)) { + dmu_object_info_t doi; + + err = dmu_object_info(mos, obj, &doi); + if (err) + return (err); + + if (doi.doi_type == DMU_OT_DSL_DATASET) { + dsl_dataset_t *ds; + rw_enter(&dp->dp_config_rwlock, RW_READER); + err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds); + rw_exit(&dp->dp_config_rwlock); + if (err) + return (err); + err = traverse_dataset(ds, + ds->ds_phys->ds_prev_snap_txg, TRAVERSE_PRE, + func, arg); + dsl_dataset_rele(ds, FTAG); + if (err) + return (err); + } + } + if (err == ESRCH) + err = 0; + return (err); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_tx.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_tx.c new file mode 100644 index 000000000000..bf560e5657c1 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_tx.c @@ -0,0 +1,1068 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include /* for dsl_dataset_block_freeable() */ +#include /* for dsl_dir_tempreserve_*() */ +#include +#include /* for fzap_default_block_shift */ +#include +#include + +typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, + uint64_t arg1, uint64_t arg2); + + +dmu_tx_t * +dmu_tx_create_dd(dsl_dir_t *dd) +{ + dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); + tx->tx_dir = dd; + if (dd) + tx->tx_pool = dd->dd_pool; + list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), + offsetof(dmu_tx_hold_t, txh_node)); +#ifdef ZFS_DEBUG + refcount_create(&tx->tx_space_written); + refcount_create(&tx->tx_space_freed); +#endif + return (tx); +} + +dmu_tx_t * +dmu_tx_create(objset_t *os) +{ + dmu_tx_t *tx = dmu_tx_create_dd(os->os->os_dsl_dataset->ds_dir); + tx->tx_objset = os; + tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os->os_dsl_dataset); + return (tx); +} + +dmu_tx_t * +dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) +{ + dmu_tx_t *tx = dmu_tx_create_dd(NULL); + + ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); + tx->tx_pool = dp; + tx->tx_txg = txg; + tx->tx_anyobj = TRUE; + + return (tx); +} + +int +dmu_tx_is_syncing(dmu_tx_t *tx) +{ + return (tx->tx_anyobj); +} + +int +dmu_tx_private_ok(dmu_tx_t *tx) +{ + return (tx->tx_anyobj); +} + +static dmu_tx_hold_t * +dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, + enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) +{ + dmu_tx_hold_t *txh; + dnode_t *dn = NULL; + int err; + + if (object != DMU_NEW_OBJECT) { + err = dnode_hold(os->os, object, tx, &dn); + if (err) { + tx->tx_err = err; + return (NULL); + } + + if (err == 0 && tx->tx_txg != 0) { + mutex_enter(&dn->dn_mtx); + /* + * dn->dn_assigned_txg == tx->tx_txg doesn't pose a + * problem, but there's no way for it to happen (for + * now, at least). + */ + ASSERT(dn->dn_assigned_txg == 0); + dn->dn_assigned_txg = tx->tx_txg; + (void) refcount_add(&dn->dn_tx_holds, tx); + mutex_exit(&dn->dn_mtx); + } + } + + txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); + txh->txh_tx = tx; + txh->txh_dnode = dn; +#ifdef ZFS_DEBUG + txh->txh_type = type; + txh->txh_arg1 = arg1; + txh->txh_arg2 = arg2; +#endif + list_insert_tail(&tx->tx_holds, txh); + + return (txh); +} + +void +dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) +{ + /* + * If we're syncing, they can manipulate any object anyhow, and + * the hold on the dnode_t can cause problems. + */ + if (!dmu_tx_is_syncing(tx)) { + (void) dmu_tx_hold_object_impl(tx, os, + object, THT_NEWOBJECT, 0, 0); + } +} + +static int +dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) +{ + int err; + dmu_buf_impl_t *db; + + rw_enter(&dn->dn_struct_rwlock, RW_READER); + db = dbuf_hold_level(dn, level, blkid, FTAG); + rw_exit(&dn->dn_struct_rwlock); + if (db == NULL) + return (EIO); + err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); + dbuf_rele(db, FTAG); + return (err); +} + +/* ARGSUSED */ +static void +dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) +{ + dnode_t *dn = txh->txh_dnode; + uint64_t start, end, i; + int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; + int err = 0; + + if (len == 0) + return; + + min_bs = SPA_MINBLOCKSHIFT; + max_bs = SPA_MAXBLOCKSHIFT; + min_ibs = DN_MIN_INDBLKSHIFT; + max_ibs = DN_MAX_INDBLKSHIFT; + + /* + * For i/o error checking, read the first and last level-0 + * blocks (if they are not aligned), and all the level-1 blocks. + */ + + if (dn) { + if (dn->dn_maxblkid == 0) { + if ((off > 0 || len < dn->dn_datablksz) && + off < dn->dn_datablksz) { + err = dmu_tx_check_ioerr(NULL, dn, 0, 0); + if (err) + goto out; + } + } else { + zio_t *zio = zio_root(dn->dn_objset->os_spa, + NULL, NULL, ZIO_FLAG_CANFAIL); + + /* first level-0 block */ + start = off >> dn->dn_datablkshift; + if (P2PHASE(off, dn->dn_datablksz) || + len < dn->dn_datablksz) { + err = dmu_tx_check_ioerr(zio, dn, 0, start); + if (err) + goto out; + } + + /* last level-0 block */ + end = (off+len-1) >> dn->dn_datablkshift; + if (end != start && end <= dn->dn_maxblkid && + P2PHASE(off+len, dn->dn_datablksz)) { + err = dmu_tx_check_ioerr(zio, dn, 0, end); + if (err) + goto out; + } + + /* level-1 blocks */ + if (dn->dn_nlevels > 1) { + start >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT; + end >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT; + for (i = start+1; i < end; i++) { + err = dmu_tx_check_ioerr(zio, dn, 1, i); + if (err) + goto out; + } + } + + err = zio_wait(zio); + if (err) + goto out; + } + } + + /* + * If there's more than one block, the blocksize can't change, + * so we can make a more precise estimate. Alternatively, + * if the dnode's ibs is larger than max_ibs, always use that. + * This ensures that if we reduce DN_MAX_INDBLKSHIFT, + * the code will still work correctly on existing pools. + */ + if (dn && (dn->dn_maxblkid != 0 || dn->dn_indblkshift > max_ibs)) { + min_ibs = max_ibs = dn->dn_indblkshift; + if (dn->dn_datablkshift != 0) + min_bs = max_bs = dn->dn_datablkshift; + } + + /* + * 'end' is the last thing we will access, not one past. + * This way we won't overflow when accessing the last byte. + */ + start = P2ALIGN(off, 1ULL << max_bs); + end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; + txh->txh_space_towrite += end - start + 1; + + start >>= min_bs; + end >>= min_bs; + + epbs = min_ibs - SPA_BLKPTRSHIFT; + + /* + * The object contains at most 2^(64 - min_bs) blocks, + * and each indirect level maps 2^epbs. + */ + for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { + start >>= epbs; + end >>= epbs; + /* + * If we increase the number of levels of indirection, + * we'll need new blkid=0 indirect blocks. If start == 0, + * we're already accounting for that blocks; and if end == 0, + * we can't increase the number of levels beyond that. + */ + if (start != 0 && end != 0) + txh->txh_space_towrite += 1ULL << max_ibs; + txh->txh_space_towrite += (end - start + 1) << max_ibs; + } + + ASSERT(txh->txh_space_towrite < 2 * DMU_MAX_ACCESS); + +out: + if (err) + txh->txh_tx->tx_err = err; +} + +static void +dmu_tx_count_dnode(dmu_tx_hold_t *txh) +{ + dnode_t *dn = txh->txh_dnode; + dnode_t *mdn = txh->txh_tx->tx_objset->os->os_meta_dnode; + uint64_t space = mdn->dn_datablksz + + ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); + + if (dn && dn->dn_dbuf->db_blkptr && + dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, + dn->dn_dbuf->db_blkptr->blk_birth)) { + txh->txh_space_tooverwrite += space; + } else { + txh->txh_space_towrite += space; + if (dn && dn->dn_dbuf->db_blkptr) + txh->txh_space_tounref += space; + } +} + +void +dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) +{ + dmu_tx_hold_t *txh; + + ASSERT(tx->tx_txg == 0); + ASSERT(len < DMU_MAX_ACCESS); + ASSERT(len == 0 || UINT64_MAX - off >= len - 1); + + txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, + object, THT_WRITE, off, len); + if (txh == NULL) + return; + + dmu_tx_count_write(txh, off, len); + dmu_tx_count_dnode(txh); +} + +static void +dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) +{ + uint64_t blkid, nblks, lastblk; + uint64_t space = 0, unref = 0, skipped = 0; + dnode_t *dn = txh->txh_dnode; + dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; + spa_t *spa = txh->txh_tx->tx_pool->dp_spa; + int epbs; + + if (dn->dn_nlevels == 0) + return; + + /* + * The struct_rwlock protects us against dn_nlevels + * changing, in case (against all odds) we manage to dirty & + * sync out the changes after we check for being dirty. + * Also, dbuf_hold_level() wants us to have the struct_rwlock. + */ + rw_enter(&dn->dn_struct_rwlock, RW_READER); + epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + if (dn->dn_maxblkid == 0) { + if (off == 0 && len >= dn->dn_datablksz) { + blkid = 0; + nblks = 1; + } else { + rw_exit(&dn->dn_struct_rwlock); + return; + } + } else { + blkid = off >> dn->dn_datablkshift; + nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; + + if (blkid >= dn->dn_maxblkid) { + rw_exit(&dn->dn_struct_rwlock); + return; + } + if (blkid + nblks > dn->dn_maxblkid) + nblks = dn->dn_maxblkid - blkid; + + } + if (dn->dn_nlevels == 1) { + int i; + for (i = 0; i < nblks; i++) { + blkptr_t *bp = dn->dn_phys->dn_blkptr; + ASSERT3U(blkid + i, <, dn->dn_nblkptr); + bp += blkid + i; + if (dsl_dataset_block_freeable(ds, bp->blk_birth)) { + dprintf_bp(bp, "can free old%s", ""); + space += bp_get_dasize(spa, bp); + } + unref += BP_GET_ASIZE(bp); + } + nblks = 0; + } + + /* + * Add in memory requirements of higher-level indirects. + * This assumes a worst-possible scenario for dn_nlevels. + */ + { + uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs); + int level = (dn->dn_nlevels > 1) ? 2 : 1; + + while (level++ < DN_MAX_LEVELS) { + txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift; + blkcnt = 1 + (blkcnt >> epbs); + } + ASSERT(blkcnt <= dn->dn_nblkptr); + } + + lastblk = blkid + nblks - 1; + while (nblks) { + dmu_buf_impl_t *dbuf; + uint64_t ibyte, new_blkid; + int epb = 1 << epbs; + int err, i, blkoff, tochk; + blkptr_t *bp; + + ibyte = blkid << dn->dn_datablkshift; + err = dnode_next_offset(dn, + DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); + new_blkid = ibyte >> dn->dn_datablkshift; + if (err == ESRCH) { + skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; + break; + } + if (err) { + txh->txh_tx->tx_err = err; + break; + } + if (new_blkid > lastblk) { + skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; + break; + } + + if (new_blkid > blkid) { + ASSERT((new_blkid >> epbs) > (blkid >> epbs)); + skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; + nblks -= new_blkid - blkid; + blkid = new_blkid; + } + blkoff = P2PHASE(blkid, epb); + tochk = MIN(epb - blkoff, nblks); + + dbuf = dbuf_hold_level(dn, 1, blkid >> epbs, FTAG); + + txh->txh_memory_tohold += dbuf->db.db_size; + if (txh->txh_memory_tohold > DMU_MAX_ACCESS) { + txh->txh_tx->tx_err = E2BIG; + dbuf_rele(dbuf, FTAG); + break; + } + err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); + if (err != 0) { + txh->txh_tx->tx_err = err; + dbuf_rele(dbuf, FTAG); + break; + } + + bp = dbuf->db.db_data; + bp += blkoff; + + for (i = 0; i < tochk; i++) { + if (dsl_dataset_block_freeable(ds, bp[i].blk_birth)) { + dprintf_bp(&bp[i], "can free old%s", ""); + space += bp_get_dasize(spa, &bp[i]); + } + unref += BP_GET_ASIZE(bp); + } + dbuf_rele(dbuf, FTAG); + + blkid += tochk; + nblks -= tochk; + } + rw_exit(&dn->dn_struct_rwlock); + + /* account for new level 1 indirect blocks that might show up */ + if (skipped > 0) { + txh->txh_fudge += skipped << dn->dn_indblkshift; + skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); + txh->txh_memory_tohold += skipped << dn->dn_indblkshift; + } + txh->txh_space_tofree += space; + txh->txh_space_tounref += unref; +} + +void +dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) +{ + dmu_tx_hold_t *txh; + dnode_t *dn; + uint64_t start, end, i; + int err, shift; + zio_t *zio; + + ASSERT(tx->tx_txg == 0); + + txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, + object, THT_FREE, off, len); + if (txh == NULL) + return; + dn = txh->txh_dnode; + + /* first block */ + if (off != 0) + dmu_tx_count_write(txh, off, 1); + /* last block */ + if (len != DMU_OBJECT_END) + dmu_tx_count_write(txh, off+len, 1); + + if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) + return; + if (len == DMU_OBJECT_END) + len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; + + /* + * For i/o error checking, read the first and last level-0 + * blocks, and all the level-1 blocks. The above count_write's + * have already taken care of the level-0 blocks. + */ + if (dn->dn_nlevels > 1) { + shift = dn->dn_datablkshift + dn->dn_indblkshift - + SPA_BLKPTRSHIFT; + start = off >> shift; + end = dn->dn_datablkshift ? ((off+len) >> shift) : 0; + + zio = zio_root(tx->tx_pool->dp_spa, + NULL, NULL, ZIO_FLAG_CANFAIL); + for (i = start; i <= end; i++) { + uint64_t ibyte = i << shift; + err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); + i = ibyte >> shift; + if (err == ESRCH) + break; + if (err) { + tx->tx_err = err; + return; + } + + err = dmu_tx_check_ioerr(zio, dn, 1, i); + if (err) { + tx->tx_err = err; + return; + } + } + err = zio_wait(zio); + if (err) { + tx->tx_err = err; + return; + } + } + + dmu_tx_count_dnode(txh); + dmu_tx_count_free(txh, off, len); +} + +void +dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, char *name) +{ + dmu_tx_hold_t *txh; + dnode_t *dn; + uint64_t nblocks; + int epbs, err; + + ASSERT(tx->tx_txg == 0); + + txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, + object, THT_ZAP, add, (uintptr_t)name); + if (txh == NULL) + return; + dn = txh->txh_dnode; + + dmu_tx_count_dnode(txh); + + if (dn == NULL) { + /* + * We will be able to fit a new object's entries into one leaf + * block. So there will be at most 2 blocks total, + * including the header block. + */ + dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); + return; + } + + ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap); + + if (dn->dn_maxblkid == 0 && !add) { + /* + * If there is only one block (i.e. this is a micro-zap) + * and we are not adding anything, the accounting is simple. + */ + err = dmu_tx_check_ioerr(NULL, dn, 0, 0); + if (err) { + tx->tx_err = err; + return; + } + + /* + * Use max block size here, since we don't know how much + * the size will change between now and the dbuf dirty call. + */ + if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, + dn->dn_phys->dn_blkptr[0].blk_birth)) { + txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; + } else { + txh->txh_space_towrite += SPA_MAXBLOCKSIZE; + txh->txh_space_tounref += + BP_GET_ASIZE(dn->dn_phys->dn_blkptr); + } + return; + } + + if (dn->dn_maxblkid > 0 && name) { + /* + * access the name in this fat-zap so that we'll check + * for i/o errors to the leaf blocks, etc. + */ + err = zap_lookup(&dn->dn_objset->os, dn->dn_object, name, + 8, 0, NULL); + if (err == EIO) { + tx->tx_err = err; + return; + } + } + + /* + * 3 blocks overwritten: target leaf, ptrtbl block, header block + * 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks + */ + dmu_tx_count_write(txh, dn->dn_maxblkid * dn->dn_datablksz, + (3 + (add ? 3 : 0)) << dn->dn_datablkshift); + + /* + * If the modified blocks are scattered to the four winds, + * we'll have to modify an indirect twig for each. + */ + epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) + txh->txh_space_towrite += 3 << dn->dn_indblkshift; +} + +void +dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) +{ + dmu_tx_hold_t *txh; + + ASSERT(tx->tx_txg == 0); + + txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, + object, THT_BONUS, 0, 0); + if (txh) + dmu_tx_count_dnode(txh); +} + +void +dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) +{ + dmu_tx_hold_t *txh; + ASSERT(tx->tx_txg == 0); + + txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, + DMU_NEW_OBJECT, THT_SPACE, space, 0); + + txh->txh_space_towrite += space; +} + +int +dmu_tx_holds(dmu_tx_t *tx, uint64_t object) +{ + dmu_tx_hold_t *txh; + int holds = 0; + + /* + * By asserting that the tx is assigned, we're counting the + * number of dn_tx_holds, which is the same as the number of + * dn_holds. Otherwise, we'd be counting dn_holds, but + * dn_tx_holds could be 0. + */ + ASSERT(tx->tx_txg != 0); + + /* if (tx->tx_anyobj == TRUE) */ + /* return (0); */ + + for (txh = list_head(&tx->tx_holds); txh; + txh = list_next(&tx->tx_holds, txh)) { + if (txh->txh_dnode && txh->txh_dnode->dn_object == object) + holds++; + } + + return (holds); +} + +#ifdef ZFS_DEBUG +void +dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) +{ + dmu_tx_hold_t *txh; + int match_object = FALSE, match_offset = FALSE; + dnode_t *dn = db->db_dnode; + + ASSERT(tx->tx_txg != 0); + ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset->os); + ASSERT3U(dn->dn_object, ==, db->db.db_object); + + if (tx->tx_anyobj) + return; + + /* XXX No checking on the meta dnode for now */ + if (db->db.db_object == DMU_META_DNODE_OBJECT) + return; + + for (txh = list_head(&tx->tx_holds); txh; + txh = list_next(&tx->tx_holds, txh)) { + ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); + if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) + match_object = TRUE; + if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { + int datablkshift = dn->dn_datablkshift ? + dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; + int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + int shift = datablkshift + epbs * db->db_level; + uint64_t beginblk = shift >= 64 ? 0 : + (txh->txh_arg1 >> shift); + uint64_t endblk = shift >= 64 ? 0 : + ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); + uint64_t blkid = db->db_blkid; + + /* XXX txh_arg2 better not be zero... */ + + dprintf("found txh type %x beginblk=%llx endblk=%llx\n", + txh->txh_type, beginblk, endblk); + + switch (txh->txh_type) { + case THT_WRITE: + if (blkid >= beginblk && blkid <= endblk) + match_offset = TRUE; + /* + * We will let this hold work for the bonus + * buffer so that we don't need to hold it + * when creating a new object. + */ + if (blkid == DB_BONUS_BLKID) + match_offset = TRUE; + /* + * They might have to increase nlevels, + * thus dirtying the new TLIBs. Or the + * might have to change the block size, + * thus dirying the new lvl=0 blk=0. + */ + if (blkid == 0) + match_offset = TRUE; + break; + case THT_FREE: + /* + * We will dirty all the level 1 blocks in + * the free range and perhaps the first and + * last level 0 block. + */ + if (blkid >= beginblk && (blkid <= endblk || + txh->txh_arg2 == DMU_OBJECT_END)) + match_offset = TRUE; + break; + case THT_BONUS: + if (blkid == DB_BONUS_BLKID) + match_offset = TRUE; + break; + case THT_ZAP: + match_offset = TRUE; + break; + case THT_NEWOBJECT: + match_object = TRUE; + break; + default: + ASSERT(!"bad txh_type"); + } + } + if (match_object && match_offset) + return; + } + panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", + (u_longlong_t)db->db.db_object, db->db_level, + (u_longlong_t)db->db_blkid); +} +#endif + +static int +dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) +{ + dmu_tx_hold_t *txh; + spa_t *spa = tx->tx_pool->dp_spa; + uint64_t memory, asize, fsize, usize; + uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; + + ASSERT3U(tx->tx_txg, ==, 0); + + if (tx->tx_err) + return (tx->tx_err); + + if (spa_suspended(spa)) { + /* + * If the user has indicated a blocking failure mode + * then return ERESTART which will block in dmu_tx_wait(). + * Otherwise, return EIO so that an error can get + * propagated back to the VOP calls. + * + * Note that we always honor the txg_how flag regardless + * of the failuremode setting. + */ + if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && + txg_how != TXG_WAIT) + return (EIO); + + return (ERESTART); + } + + tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); + tx->tx_needassign_txh = NULL; + + /* + * NB: No error returns are allowed after txg_hold_open, but + * before processing the dnode holds, due to the + * dmu_tx_unassign() logic. + */ + + towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; + for (txh = list_head(&tx->tx_holds); txh; + txh = list_next(&tx->tx_holds, txh)) { + dnode_t *dn = txh->txh_dnode; + if (dn != NULL) { + mutex_enter(&dn->dn_mtx); + if (dn->dn_assigned_txg == tx->tx_txg - 1) { + mutex_exit(&dn->dn_mtx); + tx->tx_needassign_txh = txh; + return (ERESTART); + } + if (dn->dn_assigned_txg == 0) + dn->dn_assigned_txg = tx->tx_txg; + ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); + (void) refcount_add(&dn->dn_tx_holds, tx); + mutex_exit(&dn->dn_mtx); + } + towrite += txh->txh_space_towrite; + tofree += txh->txh_space_tofree; + tooverwrite += txh->txh_space_tooverwrite; + tounref += txh->txh_space_tounref; + tohold += txh->txh_memory_tohold; + fudge += txh->txh_fudge; + } + + /* + * NB: This check must be after we've held the dnodes, so that + * the dmu_tx_unassign() logic will work properly + */ + if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) + return (ERESTART); + + /* + * If a snapshot has been taken since we made our estimates, + * assume that we won't be able to free or overwrite anything. + */ + if (tx->tx_objset && + dsl_dataset_prev_snap_txg(tx->tx_objset->os->os_dsl_dataset) > + tx->tx_lastsnap_txg) { + towrite += tooverwrite; + tooverwrite = tofree = 0; + } + + /* needed allocation: worst-case estimate of write space */ + asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); + /* freed space estimate: worst-case overwrite + free estimate */ + fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; + /* convert unrefd space to worst-case estimate */ + usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); + /* calculate memory footprint estimate */ + memory = towrite + tooverwrite + tohold; + +#ifdef ZFS_DEBUG + /* + * Add in 'tohold' to account for our dirty holds on this memory + * XXX - the "fudge" factor is to account for skipped blocks that + * we missed because dnode_next_offset() misses in-core-only blocks. + */ + tx->tx_space_towrite = asize + + spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); + tx->tx_space_tofree = tofree; + tx->tx_space_tooverwrite = tooverwrite; + tx->tx_space_tounref = tounref; +#endif + + if (tx->tx_dir && asize != 0) { + int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, + asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); + if (err) + return (err); + } + + return (0); +} + +static void +dmu_tx_unassign(dmu_tx_t *tx) +{ + dmu_tx_hold_t *txh; + + if (tx->tx_txg == 0) + return; + + txg_rele_to_quiesce(&tx->tx_txgh); + + for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; + txh = list_next(&tx->tx_holds, txh)) { + dnode_t *dn = txh->txh_dnode; + + if (dn == NULL) + continue; + mutex_enter(&dn->dn_mtx); + ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); + + if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { + dn->dn_assigned_txg = 0; + cv_broadcast(&dn->dn_notxholds); + } + mutex_exit(&dn->dn_mtx); + } + + txg_rele_to_sync(&tx->tx_txgh); + + tx->tx_lasttried_txg = tx->tx_txg; + tx->tx_txg = 0; +} + +/* + * Assign tx to a transaction group. txg_how can be one of: + * + * (1) TXG_WAIT. If the current open txg is full, waits until there's + * a new one. This should be used when you're not holding locks. + * If will only fail if we're truly out of space (or over quota). + * + * (2) TXG_NOWAIT. If we can't assign into the current open txg without + * blocking, returns immediately with ERESTART. This should be used + * whenever you're holding locks. On an ERESTART error, the caller + * should drop locks, do a dmu_tx_wait(tx), and try again. + * + * (3) A specific txg. Use this if you need to ensure that multiple + * transactions all sync in the same txg. Like TXG_NOWAIT, it + * returns ERESTART if it can't assign you into the requested txg. + */ +int +dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) +{ + int err; + + ASSERT(tx->tx_txg == 0); + ASSERT(txg_how != 0); + ASSERT(!dsl_pool_sync_context(tx->tx_pool)); + + while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { + dmu_tx_unassign(tx); + + if (err != ERESTART || txg_how != TXG_WAIT) + return (err); + + dmu_tx_wait(tx); + } + + txg_rele_to_quiesce(&tx->tx_txgh); + + return (0); +} + +void +dmu_tx_wait(dmu_tx_t *tx) +{ + spa_t *spa = tx->tx_pool->dp_spa; + + ASSERT(tx->tx_txg == 0); + + /* + * It's possible that the pool has become active after this thread + * has tried to obtain a tx. If that's the case then his + * tx_lasttried_txg would not have been assigned. + */ + if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { + txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1); + } else if (tx->tx_needassign_txh) { + dnode_t *dn = tx->tx_needassign_txh->txh_dnode; + + mutex_enter(&dn->dn_mtx); + while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) + cv_wait(&dn->dn_notxholds, &dn->dn_mtx); + mutex_exit(&dn->dn_mtx); + tx->tx_needassign_txh = NULL; + } else { + txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); + } +} + +void +dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) +{ +#ifdef ZFS_DEBUG + if (tx->tx_dir == NULL || delta == 0) + return; + + if (delta > 0) { + ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, + tx->tx_space_towrite); + (void) refcount_add_many(&tx->tx_space_written, delta, NULL); + } else { + (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); + } +#endif +} + +void +dmu_tx_commit(dmu_tx_t *tx) +{ + dmu_tx_hold_t *txh; + + ASSERT(tx->tx_txg != 0); + + while (txh = list_head(&tx->tx_holds)) { + dnode_t *dn = txh->txh_dnode; + + list_remove(&tx->tx_holds, txh); + kmem_free(txh, sizeof (dmu_tx_hold_t)); + if (dn == NULL) + continue; + mutex_enter(&dn->dn_mtx); + ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); + + if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { + dn->dn_assigned_txg = 0; + cv_broadcast(&dn->dn_notxholds); + } + mutex_exit(&dn->dn_mtx); + dnode_rele(dn, tx); + } + + if (tx->tx_tempreserve_cookie) + dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); + + if (tx->tx_anyobj == FALSE) + txg_rele_to_sync(&tx->tx_txgh); + list_destroy(&tx->tx_holds); +#ifdef ZFS_DEBUG + dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", + tx->tx_space_towrite, refcount_count(&tx->tx_space_written), + tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); + refcount_destroy_many(&tx->tx_space_written, + refcount_count(&tx->tx_space_written)); + refcount_destroy_many(&tx->tx_space_freed, + refcount_count(&tx->tx_space_freed)); +#endif + kmem_free(tx, sizeof (dmu_tx_t)); +} + +void +dmu_tx_abort(dmu_tx_t *tx) +{ + dmu_tx_hold_t *txh; + + ASSERT(tx->tx_txg == 0); + + while (txh = list_head(&tx->tx_holds)) { + dnode_t *dn = txh->txh_dnode; + + list_remove(&tx->tx_holds, txh); + kmem_free(txh, sizeof (dmu_tx_hold_t)); + if (dn != NULL) + dnode_rele(dn, tx); + } + list_destroy(&tx->tx_holds); +#ifdef ZFS_DEBUG + refcount_destroy_many(&tx->tx_space_written, + refcount_count(&tx->tx_space_written)); + refcount_destroy_many(&tx->tx_space_freed, + refcount_count(&tx->tx_space_freed)); +#endif + kmem_free(tx, sizeof (dmu_tx_t)); +} + +uint64_t +dmu_tx_get_txg(dmu_tx_t *tx) +{ + ASSERT(tx->tx_txg != 0); + return (tx->tx_txg); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_zfetch.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_zfetch.c new file mode 100644 index 000000000000..4d79fe98e17e --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dmu_zfetch.c @@ -0,0 +1,651 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include + +/* + * I'm against tune-ables, but these should probably exist as tweakable globals + * until we can get this working the way we want it to. + */ + +int zfs_prefetch_disable = 0; + +/* max # of streams per zfetch */ +uint32_t zfetch_max_streams = 8; +/* min time before stream reclaim */ +uint32_t zfetch_min_sec_reap = 2; +/* max number of blocks to fetch at a time */ +uint32_t zfetch_block_cap = 256; +/* number of bytes in a array_read at which we stop prefetching (1Mb) */ +uint64_t zfetch_array_rd_sz = 1024 * 1024; + +/* forward decls for static routines */ +static int dmu_zfetch_colinear(zfetch_t *, zstream_t *); +static void dmu_zfetch_dofetch(zfetch_t *, zstream_t *); +static uint64_t dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t); +static uint64_t dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t); +static int dmu_zfetch_find(zfetch_t *, zstream_t *, int); +static int dmu_zfetch_stream_insert(zfetch_t *, zstream_t *); +static zstream_t *dmu_zfetch_stream_reclaim(zfetch_t *); +static void dmu_zfetch_stream_remove(zfetch_t *, zstream_t *); +static int dmu_zfetch_streams_equal(zstream_t *, zstream_t *); + +/* + * Given a zfetch structure and a zstream structure, determine whether the + * blocks to be read are part of a co-linear pair of existing prefetch + * streams. If a set is found, coalesce the streams, removing one, and + * configure the prefetch so it looks for a strided access pattern. + * + * In other words: if we find two sequential access streams that are + * the same length and distance N appart, and this read is N from the + * last stream, then we are probably in a strided access pattern. So + * combine the two sequential streams into a single strided stream. + * + * If no co-linear streams are found, return NULL. + */ +static int +dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh) +{ + zstream_t *z_walk; + zstream_t *z_comp; + + if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER)) + return (0); + + if (zh == NULL) { + rw_exit(&zf->zf_rwlock); + return (0); + } + + for (z_walk = list_head(&zf->zf_stream); z_walk; + z_walk = list_next(&zf->zf_stream, z_walk)) { + for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp; + z_comp = list_next(&zf->zf_stream, z_comp)) { + int64_t diff; + + if (z_walk->zst_len != z_walk->zst_stride || + z_comp->zst_len != z_comp->zst_stride) { + continue; + } + + diff = z_comp->zst_offset - z_walk->zst_offset; + if (z_comp->zst_offset + diff == zh->zst_offset) { + z_walk->zst_offset = zh->zst_offset; + z_walk->zst_direction = diff < 0 ? -1 : 1; + z_walk->zst_stride = + diff * z_walk->zst_direction; + z_walk->zst_ph_offset = + zh->zst_offset + z_walk->zst_stride; + dmu_zfetch_stream_remove(zf, z_comp); + mutex_destroy(&z_comp->zst_lock); + kmem_free(z_comp, sizeof (zstream_t)); + + dmu_zfetch_dofetch(zf, z_walk); + + rw_exit(&zf->zf_rwlock); + return (1); + } + + diff = z_walk->zst_offset - z_comp->zst_offset; + if (z_walk->zst_offset + diff == zh->zst_offset) { + z_walk->zst_offset = zh->zst_offset; + z_walk->zst_direction = diff < 0 ? -1 : 1; + z_walk->zst_stride = + diff * z_walk->zst_direction; + z_walk->zst_ph_offset = + zh->zst_offset + z_walk->zst_stride; + dmu_zfetch_stream_remove(zf, z_comp); + mutex_destroy(&z_comp->zst_lock); + kmem_free(z_comp, sizeof (zstream_t)); + + dmu_zfetch_dofetch(zf, z_walk); + + rw_exit(&zf->zf_rwlock); + return (1); + } + } + } + + rw_exit(&zf->zf_rwlock); + return (0); +} + +/* + * Given a zstream_t, determine the bounds of the prefetch. Then call the + * routine that actually prefetches the individual blocks. + */ +static void +dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs) +{ + uint64_t prefetch_tail; + uint64_t prefetch_limit; + uint64_t prefetch_ofst; + uint64_t prefetch_len; + uint64_t blocks_fetched; + + zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len); + zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap); + + prefetch_tail = MAX((int64_t)zs->zst_ph_offset, + (int64_t)(zs->zst_offset + zs->zst_stride)); + /* + * XXX: use a faster division method? + */ + prefetch_limit = zs->zst_offset + zs->zst_len + + (zs->zst_cap * zs->zst_stride) / zs->zst_len; + + while (prefetch_tail < prefetch_limit) { + prefetch_ofst = zs->zst_offset + zs->zst_direction * + (prefetch_tail - zs->zst_offset); + + prefetch_len = zs->zst_len; + + /* + * Don't prefetch beyond the end of the file, if working + * backwards. + */ + if ((zs->zst_direction == ZFETCH_BACKWARD) && + (prefetch_ofst > prefetch_tail)) { + prefetch_len += prefetch_ofst; + prefetch_ofst = 0; + } + + /* don't prefetch more than we're supposed to */ + if (prefetch_len > zs->zst_len) + break; + + blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode, + prefetch_ofst, zs->zst_len); + + prefetch_tail += zs->zst_stride; + /* stop if we've run out of stuff to prefetch */ + if (blocks_fetched < zs->zst_len) + break; + } + zs->zst_ph_offset = prefetch_tail; + zs->zst_last = lbolt; +} + +/* + * This takes a pointer to a zfetch structure and a dnode. It performs the + * necessary setup for the zfetch structure, grokking data from the + * associated dnode. + */ +void +dmu_zfetch_init(zfetch_t *zf, dnode_t *dno) +{ + if (zf == NULL) { + return; + } + + zf->zf_dnode = dno; + zf->zf_stream_cnt = 0; + zf->zf_alloc_fail = 0; + + list_create(&zf->zf_stream, sizeof (zstream_t), + offsetof(zstream_t, zst_node)); + + rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL); +} + +/* + * This function computes the actual size, in blocks, that can be prefetched, + * and fetches it. + */ +static uint64_t +dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks) +{ + uint64_t fetchsz; + uint64_t i; + + fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks); + + for (i = 0; i < fetchsz; i++) { + dbuf_prefetch(dn, blkid + i); + } + + return (fetchsz); +} + +/* + * this function returns the number of blocks that would be prefetched, based + * upon the supplied dnode, blockid, and nblks. This is used so that we can + * update streams in place, and then prefetch with their old value after the + * fact. This way, we can delay the prefetch, but subsequent accesses to the + * stream won't result in the same data being prefetched multiple times. + */ +static uint64_t +dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks) +{ + uint64_t fetchsz; + + if (blkid > dn->dn_maxblkid) { + return (0); + } + + /* compute fetch size */ + if (blkid + nblks + 1 > dn->dn_maxblkid) { + fetchsz = (dn->dn_maxblkid - blkid) + 1; + ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid); + } else { + fetchsz = nblks; + } + + + return (fetchsz); +} + +/* + * given a zfetch and a zsearch structure, see if there is an associated zstream + * for this block read. If so, it starts a prefetch for the stream it + * located and returns true, otherwise it returns false + */ +static int +dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched) +{ + zstream_t *zs; + int64_t diff; + int reset = !prefetched; + int rc = 0; + + if (zh == NULL) + return (0); + + /* + * XXX: This locking strategy is a bit coarse; however, it's impact has + * yet to be tested. If this turns out to be an issue, it can be + * modified in a number of different ways. + */ + + rw_enter(&zf->zf_rwlock, RW_READER); +top: + + for (zs = list_head(&zf->zf_stream); zs; + zs = list_next(&zf->zf_stream, zs)) { + + /* + * XXX - should this be an assert? + */ + if (zs->zst_len == 0) { + /* bogus stream */ + continue; + } + + /* + * We hit this case when we are in a strided prefetch stream: + * we will read "len" blocks before "striding". + */ + if (zh->zst_offset >= zs->zst_offset && + zh->zst_offset < zs->zst_offset + zs->zst_len) { + /* already fetched */ + rc = 1; + goto out; + } + + /* + * This is the forward sequential read case: we increment + * len by one each time we hit here, so we will enter this + * case on every read. + */ + if (zh->zst_offset == zs->zst_offset + zs->zst_len) { + + reset = !prefetched && zs->zst_len > 1; + + mutex_enter(&zs->zst_lock); + + if (zh->zst_offset != zs->zst_offset + zs->zst_len) { + mutex_exit(&zs->zst_lock); + goto top; + } + zs->zst_len += zh->zst_len; + diff = zs->zst_len - zfetch_block_cap; + if (diff > 0) { + zs->zst_offset += diff; + zs->zst_len = zs->zst_len > diff ? + zs->zst_len - diff : 0; + } + zs->zst_direction = ZFETCH_FORWARD; + + break; + + /* + * Same as above, but reading backwards through the file. + */ + } else if (zh->zst_offset == zs->zst_offset - zh->zst_len) { + /* backwards sequential access */ + + reset = !prefetched && zs->zst_len > 1; + + mutex_enter(&zs->zst_lock); + + if (zh->zst_offset != zs->zst_offset - zh->zst_len) { + mutex_exit(&zs->zst_lock); + goto top; + } + + zs->zst_offset = zs->zst_offset > zh->zst_len ? + zs->zst_offset - zh->zst_len : 0; + zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ? + zs->zst_ph_offset - zh->zst_len : 0; + zs->zst_len += zh->zst_len; + + diff = zs->zst_len - zfetch_block_cap; + if (diff > 0) { + zs->zst_ph_offset = zs->zst_ph_offset > diff ? + zs->zst_ph_offset - diff : 0; + zs->zst_len = zs->zst_len > diff ? + zs->zst_len - diff : zs->zst_len; + } + zs->zst_direction = ZFETCH_BACKWARD; + + break; + + } else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride < + zs->zst_len) && (zs->zst_len != zs->zst_stride)) { + /* strided forward access */ + + mutex_enter(&zs->zst_lock); + + if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >= + zs->zst_len) || (zs->zst_len == zs->zst_stride)) { + mutex_exit(&zs->zst_lock); + goto top; + } + + zs->zst_offset += zs->zst_stride; + zs->zst_direction = ZFETCH_FORWARD; + + break; + + } else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride < + zs->zst_len) && (zs->zst_len != zs->zst_stride)) { + /* strided reverse access */ + + mutex_enter(&zs->zst_lock); + + if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >= + zs->zst_len) || (zs->zst_len == zs->zst_stride)) { + mutex_exit(&zs->zst_lock); + goto top; + } + + zs->zst_offset = zs->zst_offset > zs->zst_stride ? + zs->zst_offset - zs->zst_stride : 0; + zs->zst_ph_offset = (zs->zst_ph_offset > + (2 * zs->zst_stride)) ? + (zs->zst_ph_offset - (2 * zs->zst_stride)) : 0; + zs->zst_direction = ZFETCH_BACKWARD; + + break; + } + } + + if (zs) { + if (reset) { + zstream_t *remove = zs; + + rc = 0; + mutex_exit(&zs->zst_lock); + rw_exit(&zf->zf_rwlock); + rw_enter(&zf->zf_rwlock, RW_WRITER); + /* + * Relocate the stream, in case someone removes + * it while we were acquiring the WRITER lock. + */ + for (zs = list_head(&zf->zf_stream); zs; + zs = list_next(&zf->zf_stream, zs)) { + if (zs == remove) { + dmu_zfetch_stream_remove(zf, zs); + mutex_destroy(&zs->zst_lock); + kmem_free(zs, sizeof (zstream_t)); + break; + } + } + } else { + rc = 1; + dmu_zfetch_dofetch(zf, zs); + mutex_exit(&zs->zst_lock); + } + } +out: + rw_exit(&zf->zf_rwlock); + return (rc); +} + +/* + * Clean-up state associated with a zfetch structure. This frees allocated + * structure members, empties the zf_stream tree, and generally makes things + * nice. This doesn't free the zfetch_t itself, that's left to the caller. + */ +void +dmu_zfetch_rele(zfetch_t *zf) +{ + zstream_t *zs; + zstream_t *zs_next; + + ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock)); + + for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) { + zs_next = list_next(&zf->zf_stream, zs); + + list_remove(&zf->zf_stream, zs); + mutex_destroy(&zs->zst_lock); + kmem_free(zs, sizeof (zstream_t)); + } + list_destroy(&zf->zf_stream); + rw_destroy(&zf->zf_rwlock); + + zf->zf_dnode = NULL; +} + +/* + * Given a zfetch and zstream structure, insert the zstream structure into the + * AVL tree contained within the zfetch structure. Peform the appropriate + * book-keeping. It is possible that another thread has inserted a stream which + * matches one that we are about to insert, so we must be sure to check for this + * case. If one is found, return failure, and let the caller cleanup the + * duplicates. + */ +static int +dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs) +{ + zstream_t *zs_walk; + zstream_t *zs_next; + + ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); + + for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) { + zs_next = list_next(&zf->zf_stream, zs_walk); + + if (dmu_zfetch_streams_equal(zs_walk, zs)) { + return (0); + } + } + + list_insert_head(&zf->zf_stream, zs); + zf->zf_stream_cnt++; + + return (1); +} + + +/* + * Walk the list of zstreams in the given zfetch, find an old one (by time), and + * reclaim it for use by the caller. + */ +static zstream_t * +dmu_zfetch_stream_reclaim(zfetch_t *zf) +{ + zstream_t *zs; + + if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER)) + return (0); + + for (zs = list_head(&zf->zf_stream); zs; + zs = list_next(&zf->zf_stream, zs)) { + + if (((lbolt - zs->zst_last) / hz) > zfetch_min_sec_reap) + break; + } + + if (zs) { + dmu_zfetch_stream_remove(zf, zs); + mutex_destroy(&zs->zst_lock); + bzero(zs, sizeof (zstream_t)); + } else { + zf->zf_alloc_fail++; + } + rw_exit(&zf->zf_rwlock); + + return (zs); +} + +/* + * Given a zfetch and zstream structure, remove the zstream structure from its + * container in the zfetch structure. Perform the appropriate book-keeping. + */ +static void +dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs) +{ + ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); + + list_remove(&zf->zf_stream, zs); + zf->zf_stream_cnt--; +} + +static int +dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2) +{ + if (zs1->zst_offset != zs2->zst_offset) + return (0); + + if (zs1->zst_len != zs2->zst_len) + return (0); + + if (zs1->zst_stride != zs2->zst_stride) + return (0); + + if (zs1->zst_ph_offset != zs2->zst_ph_offset) + return (0); + + if (zs1->zst_cap != zs2->zst_cap) + return (0); + + if (zs1->zst_direction != zs2->zst_direction) + return (0); + + return (1); +} + +/* + * This is the prefetch entry point. It calls all of the other dmu_zfetch + * routines to create, delete, find, or operate upon prefetch streams. + */ +void +dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched) +{ + zstream_t zst; + zstream_t *newstream; + int fetched; + int inserted; + unsigned int blkshft; + uint64_t blksz; + + if (zfs_prefetch_disable) + return; + + /* files that aren't ln2 blocksz are only one block -- nothing to do */ + if (!zf->zf_dnode->dn_datablkshift) + return; + + /* convert offset and size, into blockid and nblocks */ + blkshft = zf->zf_dnode->dn_datablkshift; + blksz = (1 << blkshft); + + bzero(&zst, sizeof (zstream_t)); + zst.zst_offset = offset >> blkshft; + zst.zst_len = (P2ROUNDUP(offset + size, blksz) - + P2ALIGN(offset, blksz)) >> blkshft; + + fetched = dmu_zfetch_find(zf, &zst, prefetched); + if (!fetched) { + fetched = dmu_zfetch_colinear(zf, &zst); + } + + if (!fetched) { + newstream = dmu_zfetch_stream_reclaim(zf); + + /* + * we still couldn't find a stream, drop the lock, and allocate + * one if possible. Otherwise, give up and go home. + */ + if (newstream == NULL) { + uint64_t maxblocks; + uint32_t max_streams; + uint32_t cur_streams; + + cur_streams = zf->zf_stream_cnt; + maxblocks = zf->zf_dnode->dn_maxblkid; + + max_streams = MIN(zfetch_max_streams, + (maxblocks / zfetch_block_cap)); + if (max_streams == 0) { + max_streams++; + } + + if (cur_streams >= max_streams) { + return; + } + + newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP); + } + + newstream->zst_offset = zst.zst_offset; + newstream->zst_len = zst.zst_len; + newstream->zst_stride = zst.zst_len; + newstream->zst_ph_offset = zst.zst_len + zst.zst_offset; + newstream->zst_cap = zst.zst_len; + newstream->zst_direction = ZFETCH_FORWARD; + newstream->zst_last = lbolt; + + mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL); + + rw_enter(&zf->zf_rwlock, RW_WRITER); + inserted = dmu_zfetch_stream_insert(zf, newstream); + rw_exit(&zf->zf_rwlock); + + if (!inserted) { + mutex_destroy(&newstream->zst_lock); + kmem_free(newstream, sizeof (zstream_t)); + } + } +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dnode.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dnode.c new file mode 100644 index 000000000000..e77834d60dcc --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dnode.c @@ -0,0 +1,1443 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int free_range_compar(const void *node1, const void *node2); + +static kmem_cache_t *dnode_cache; + +static dnode_phys_t dnode_phys_zero; + +int zfs_default_bs = SPA_MINBLOCKSHIFT; +int zfs_default_ibs = DN_MAX_INDBLKSHIFT; + +/* ARGSUSED */ +static int +dnode_cons(void *arg, void *unused, int kmflag) +{ + int i; + dnode_t *dn = arg; + bzero(dn, sizeof (dnode_t)); + + rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL); + mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL); + refcount_create(&dn->dn_holds); + refcount_create(&dn->dn_tx_holds); + + for (i = 0; i < TXG_SIZE; i++) { + avl_create(&dn->dn_ranges[i], free_range_compar, + sizeof (free_range_t), + offsetof(struct free_range, fr_node)); + list_create(&dn->dn_dirty_records[i], + sizeof (dbuf_dirty_record_t), + offsetof(dbuf_dirty_record_t, dr_dirty_node)); + } + + list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t), + offsetof(dmu_buf_impl_t, db_link)); + + return (0); +} + +/* ARGSUSED */ +static void +dnode_dest(void *arg, void *unused) +{ + int i; + dnode_t *dn = arg; + + rw_destroy(&dn->dn_struct_rwlock); + mutex_destroy(&dn->dn_mtx); + mutex_destroy(&dn->dn_dbufs_mtx); + refcount_destroy(&dn->dn_holds); + refcount_destroy(&dn->dn_tx_holds); + + for (i = 0; i < TXG_SIZE; i++) { + avl_destroy(&dn->dn_ranges[i]); + list_destroy(&dn->dn_dirty_records[i]); + } + + list_destroy(&dn->dn_dbufs); +} + +void +dnode_init(void) +{ + dnode_cache = kmem_cache_create("dnode_t", + sizeof (dnode_t), + 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0); +} + +void +dnode_fini(void) +{ + kmem_cache_destroy(dnode_cache); +} + + +#ifdef ZFS_DEBUG +void +dnode_verify(dnode_t *dn) +{ + int drop_struct_lock = FALSE; + + ASSERT(dn->dn_phys); + ASSERT(dn->dn_objset); + + ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES); + + if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY)) + return; + + if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { + rw_enter(&dn->dn_struct_rwlock, RW_READER); + drop_struct_lock = TRUE; + } + if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) { + int i; + ASSERT3U(dn->dn_indblkshift, >=, 0); + ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT); + if (dn->dn_datablkshift) { + ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT); + ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT); + ASSERT3U(1<dn_datablkshift, ==, dn->dn_datablksz); + } + ASSERT3U(dn->dn_nlevels, <=, 30); + ASSERT3U(dn->dn_type, <=, DMU_OT_NUMTYPES); + ASSERT3U(dn->dn_nblkptr, >=, 1); + ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); + ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); + ASSERT3U(dn->dn_datablksz, ==, + dn->dn_datablkszsec << SPA_MINBLOCKSHIFT); + ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0); + ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) + + dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); + for (i = 0; i < TXG_SIZE; i++) { + ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels); + } + } + if (dn->dn_phys->dn_type != DMU_OT_NONE) + ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels); + ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || dn->dn_dbuf != NULL); + if (dn->dn_dbuf != NULL) { + ASSERT3P(dn->dn_phys, ==, + (dnode_phys_t *)dn->dn_dbuf->db.db_data + + (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT))); + } + if (drop_struct_lock) + rw_exit(&dn->dn_struct_rwlock); +} +#endif + +void +dnode_byteswap(dnode_phys_t *dnp) +{ + uint64_t *buf64 = (void*)&dnp->dn_blkptr; + int i; + + if (dnp->dn_type == DMU_OT_NONE) { + bzero(dnp, sizeof (dnode_phys_t)); + return; + } + + dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec); + dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen); + dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid); + dnp->dn_used = BSWAP_64(dnp->dn_used); + + /* + * dn_nblkptr is only one byte, so it's OK to read it in either + * byte order. We can't read dn_bouslen. + */ + ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT); + ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR); + for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++) + buf64[i] = BSWAP_64(buf64[i]); + + /* + * OK to check dn_bonuslen for zero, because it won't matter if + * we have the wrong byte order. This is necessary because the + * dnode dnode is smaller than a regular dnode. + */ + if (dnp->dn_bonuslen != 0) { + /* + * Note that the bonus length calculated here may be + * longer than the actual bonus buffer. This is because + * we always put the bonus buffer after the last block + * pointer (instead of packing it against the end of the + * dnode buffer). + */ + int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t); + size_t len = DN_MAX_BONUSLEN - off; + ASSERT3U(dnp->dn_bonustype, <, DMU_OT_NUMTYPES); + dmu_ot[dnp->dn_bonustype].ot_byteswap(dnp->dn_bonus + off, len); + } +} + +void +dnode_buf_byteswap(void *vbuf, size_t size) +{ + dnode_phys_t *buf = vbuf; + int i; + + ASSERT3U(sizeof (dnode_phys_t), ==, (1<>= DNODE_SHIFT; + for (i = 0; i < size; i++) { + dnode_byteswap(buf); + buf++; + } +} + +static int +free_range_compar(const void *node1, const void *node2) +{ + const free_range_t *rp1 = node1; + const free_range_t *rp2 = node2; + + if (rp1->fr_blkid < rp2->fr_blkid) + return (-1); + else if (rp1->fr_blkid > rp2->fr_blkid) + return (1); + else return (0); +} + +void +dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx) +{ + ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); + + dnode_setdirty(dn, tx); + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + ASSERT3U(newsize, <=, DN_MAX_BONUSLEN - + (dn->dn_nblkptr-1) * sizeof (blkptr_t)); + dn->dn_bonuslen = newsize; + if (newsize == 0) + dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; + else + dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; + rw_exit(&dn->dn_struct_rwlock); +} + +static void +dnode_setdblksz(dnode_t *dn, int size) +{ + ASSERT3U(P2PHASE(size, SPA_MINBLOCKSIZE), ==, 0); + ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); + ASSERT3U(size, >=, SPA_MINBLOCKSIZE); + ASSERT3U(size >> SPA_MINBLOCKSHIFT, <, + 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8)); + dn->dn_datablksz = size; + dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT; + dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0; +} + +static dnode_t * +dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db, + uint64_t object) +{ + dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP); + (void) dnode_cons(dn, NULL, 0); /* XXX */ + + dn->dn_objset = os; + dn->dn_object = object; + dn->dn_dbuf = db; + dn->dn_phys = dnp; + + if (dnp->dn_datablkszsec) + dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); + dn->dn_indblkshift = dnp->dn_indblkshift; + dn->dn_nlevels = dnp->dn_nlevels; + dn->dn_type = dnp->dn_type; + dn->dn_nblkptr = dnp->dn_nblkptr; + dn->dn_checksum = dnp->dn_checksum; + dn->dn_compress = dnp->dn_compress; + dn->dn_bonustype = dnp->dn_bonustype; + dn->dn_bonuslen = dnp->dn_bonuslen; + dn->dn_maxblkid = dnp->dn_maxblkid; + + dmu_zfetch_init(&dn->dn_zfetch, dn); + + ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES); + mutex_enter(&os->os_lock); + list_insert_head(&os->os_dnodes, dn); + mutex_exit(&os->os_lock); + + arc_space_consume(sizeof (dnode_t)); + return (dn); +} + +static void +dnode_destroy(dnode_t *dn) +{ + objset_impl_t *os = dn->dn_objset; + +#ifdef ZFS_DEBUG + int i; + + for (i = 0; i < TXG_SIZE; i++) { + ASSERT(!list_link_active(&dn->dn_dirty_link[i])); + ASSERT(NULL == list_head(&dn->dn_dirty_records[i])); + ASSERT(0 == avl_numnodes(&dn->dn_ranges[i])); + } + ASSERT(NULL == list_head(&dn->dn_dbufs)); +#endif + + mutex_enter(&os->os_lock); + list_remove(&os->os_dnodes, dn); + mutex_exit(&os->os_lock); + + if (dn->dn_dirtyctx_firstset) { + kmem_free(dn->dn_dirtyctx_firstset, 1); + dn->dn_dirtyctx_firstset = NULL; + } + dmu_zfetch_rele(&dn->dn_zfetch); + if (dn->dn_bonus) { + mutex_enter(&dn->dn_bonus->db_mtx); + dbuf_evict(dn->dn_bonus); + dn->dn_bonus = NULL; + } + kmem_cache_free(dnode_cache, dn); + arc_space_return(sizeof (dnode_t)); +} + +void +dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + int i; + + if (blocksize == 0) + blocksize = 1 << zfs_default_bs; + else if (blocksize > SPA_MAXBLOCKSIZE) + blocksize = SPA_MAXBLOCKSIZE; + else + blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE); + + if (ibs == 0) + ibs = zfs_default_ibs; + + ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT); + + dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset, + dn->dn_object, tx->tx_txg, blocksize, ibs); + + ASSERT(dn->dn_type == DMU_OT_NONE); + ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0); + ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE); + ASSERT(ot != DMU_OT_NONE); + ASSERT3U(ot, <, DMU_OT_NUMTYPES); + ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || + (bonustype != DMU_OT_NONE && bonuslen != 0)); + ASSERT3U(bonustype, <, DMU_OT_NUMTYPES); + ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); + ASSERT(dn->dn_type == DMU_OT_NONE); + ASSERT3U(dn->dn_maxblkid, ==, 0); + ASSERT3U(dn->dn_allocated_txg, ==, 0); + ASSERT3U(dn->dn_assigned_txg, ==, 0); + ASSERT(refcount_is_zero(&dn->dn_tx_holds)); + ASSERT3U(refcount_count(&dn->dn_holds), <=, 1); + ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); + + for (i = 0; i < TXG_SIZE; i++) { + ASSERT3U(dn->dn_next_nlevels[i], ==, 0); + ASSERT3U(dn->dn_next_indblkshift[i], ==, 0); + ASSERT3U(dn->dn_next_bonuslen[i], ==, 0); + ASSERT3U(dn->dn_next_blksz[i], ==, 0); + ASSERT(!list_link_active(&dn->dn_dirty_link[i])); + ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL); + ASSERT3U(avl_numnodes(&dn->dn_ranges[i]), ==, 0); + } + + dn->dn_type = ot; + dnode_setdblksz(dn, blocksize); + dn->dn_indblkshift = ibs; + dn->dn_nlevels = 1; + dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); + dn->dn_bonustype = bonustype; + dn->dn_bonuslen = bonuslen; + dn->dn_checksum = ZIO_CHECKSUM_INHERIT; + dn->dn_compress = ZIO_COMPRESS_INHERIT; + dn->dn_dirtyctx = 0; + + dn->dn_free_txg = 0; + if (dn->dn_dirtyctx_firstset) { + kmem_free(dn->dn_dirtyctx_firstset, 1); + dn->dn_dirtyctx_firstset = NULL; + } + + dn->dn_allocated_txg = tx->tx_txg; + + dnode_setdirty(dn, tx); + dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; + dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; + dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; +} + +void +dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + int i, old_nblkptr; + dmu_buf_impl_t *db = NULL; + + ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE); + ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE); + ASSERT3U(blocksize % SPA_MINBLOCKSIZE, ==, 0); + ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); + ASSERT(tx->tx_txg != 0); + ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || + (bonustype != DMU_OT_NONE && bonuslen != 0)); + ASSERT3U(bonustype, <, DMU_OT_NUMTYPES); + ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); + + for (i = 0; i < TXG_SIZE; i++) + ASSERT(!list_link_active(&dn->dn_dirty_link[i])); + + /* clean up any unreferenced dbufs */ + dnode_evict_dbufs(dn); + ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); + + /* + * XXX I should really have a generation number to tell if we + * need to do this... + */ + if (blocksize != dn->dn_datablksz || + dn->dn_bonustype != bonustype || dn->dn_bonuslen != bonuslen) { + /* free all old data */ + dnode_free_range(dn, 0, -1ULL, tx); + } + + /* change blocksize */ + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + if (blocksize != dn->dn_datablksz && + (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) || + list_head(&dn->dn_dbufs) != NULL)) { + db = dbuf_hold(dn, 0, FTAG); + dbuf_new_size(db, blocksize, tx); + } + dnode_setdblksz(dn, blocksize); + dnode_setdirty(dn, tx); + dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen; + dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize; + rw_exit(&dn->dn_struct_rwlock); + if (db) + dbuf_rele(db, FTAG); + + /* change type */ + dn->dn_type = ot; + + /* change bonus size and type */ + mutex_enter(&dn->dn_mtx); + old_nblkptr = dn->dn_nblkptr; + dn->dn_bonustype = bonustype; + dn->dn_bonuslen = bonuslen; + dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); + dn->dn_checksum = ZIO_CHECKSUM_INHERIT; + dn->dn_compress = ZIO_COMPRESS_INHERIT; + ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); + + /* XXX - for now, we can't make nblkptr smaller */ + ASSERT3U(dn->dn_nblkptr, >=, old_nblkptr); + + /* fix up the bonus db_size if dn_nblkptr has changed */ + if (dn->dn_bonus && dn->dn_bonuslen != old_nblkptr) { + dn->dn_bonus->db.db_size = + DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t); + ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size); + } + + dn->dn_allocated_txg = tx->tx_txg; + mutex_exit(&dn->dn_mtx); +} + +void +dnode_special_close(dnode_t *dn) +{ + /* + * Wait for final references to the dnode to clear. This can + * only happen if the arc is asyncronously evicting state that + * has a hold on this dnode while we are trying to evict this + * dnode. + */ + while (refcount_count(&dn->dn_holds) > 0) + delay(1); + dnode_destroy(dn); +} + +dnode_t * +dnode_special_open(objset_impl_t *os, dnode_phys_t *dnp, uint64_t object) +{ + dnode_t *dn = dnode_create(os, dnp, NULL, object); + DNODE_VERIFY(dn); + return (dn); +} + +static void +dnode_buf_pageout(dmu_buf_t *db, void *arg) +{ + dnode_t **children_dnodes = arg; + int i; + int epb = db->db_size >> DNODE_SHIFT; + + for (i = 0; i < epb; i++) { + dnode_t *dn = children_dnodes[i]; + int n; + + if (dn == NULL) + continue; +#ifdef ZFS_DEBUG + /* + * If there are holds on this dnode, then there should + * be holds on the dnode's containing dbuf as well; thus + * it wouldn't be eligable for eviction and this function + * would not have been called. + */ + ASSERT(refcount_is_zero(&dn->dn_holds)); + ASSERT(list_head(&dn->dn_dbufs) == NULL); + ASSERT(refcount_is_zero(&dn->dn_tx_holds)); + + for (n = 0; n < TXG_SIZE; n++) + ASSERT(!list_link_active(&dn->dn_dirty_link[n])); +#endif + children_dnodes[i] = NULL; + dnode_destroy(dn); + } + kmem_free(children_dnodes, epb * sizeof (dnode_t *)); +} + +/* + * errors: + * EINVAL - invalid object number. + * EIO - i/o error. + * succeeds even for free dnodes. + */ +int +dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag, + void *tag, dnode_t **dnp) +{ + int epb, idx, err; + int drop_struct_lock = FALSE; + int type; + uint64_t blk; + dnode_t *mdn, *dn; + dmu_buf_impl_t *db; + dnode_t **children_dnodes; + + /* + * If you are holding the spa config lock as writer, you shouldn't + * be asking the DMU to do *anything*. + */ + ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0); + + if (object == 0 || object >= DN_MAX_OBJECT) + return (EINVAL); + + mdn = os->os_meta_dnode; + + DNODE_VERIFY(mdn); + + if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) { + rw_enter(&mdn->dn_struct_rwlock, RW_READER); + drop_struct_lock = TRUE; + } + + blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t)); + + db = dbuf_hold(mdn, blk, FTAG); + if (drop_struct_lock) + rw_exit(&mdn->dn_struct_rwlock); + if (db == NULL) + return (EIO); + err = dbuf_read(db, NULL, DB_RF_CANFAIL); + if (err) { + dbuf_rele(db, FTAG); + return (err); + } + + ASSERT3U(db->db.db_size, >=, 1<db.db_size >> DNODE_SHIFT; + + idx = object & (epb-1); + + children_dnodes = dmu_buf_get_user(&db->db); + if (children_dnodes == NULL) { + dnode_t **winner; + children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *), + KM_SLEEP); + if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL, + dnode_buf_pageout)) { + kmem_free(children_dnodes, epb * sizeof (dnode_t *)); + children_dnodes = winner; + } + } + + if ((dn = children_dnodes[idx]) == NULL) { + dnode_phys_t *dnp = (dnode_phys_t *)db->db.db_data+idx; + dnode_t *winner; + + dn = dnode_create(os, dnp, db, object); + winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn); + if (winner != NULL) { + dnode_destroy(dn); + dn = winner; + } + } + + mutex_enter(&dn->dn_mtx); + type = dn->dn_type; + if (dn->dn_free_txg || + ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) || + ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)) { + mutex_exit(&dn->dn_mtx); + dbuf_rele(db, FTAG); + return (type == DMU_OT_NONE ? ENOENT : EEXIST); + } + mutex_exit(&dn->dn_mtx); + + if (refcount_add(&dn->dn_holds, tag) == 1) + dbuf_add_ref(db, dn); + + DNODE_VERIFY(dn); + ASSERT3P(dn->dn_dbuf, ==, db); + ASSERT3U(dn->dn_object, ==, object); + dbuf_rele(db, FTAG); + + *dnp = dn; + return (0); +} + +/* + * Return held dnode if the object is allocated, NULL if not. + */ +int +dnode_hold(objset_impl_t *os, uint64_t object, void *tag, dnode_t **dnp) +{ + return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp)); +} + +/* + * Can only add a reference if there is already at least one + * reference on the dnode. Returns FALSE if unable to add a + * new reference. + */ +boolean_t +dnode_add_ref(dnode_t *dn, void *tag) +{ + mutex_enter(&dn->dn_mtx); + if (refcount_is_zero(&dn->dn_holds)) { + mutex_exit(&dn->dn_mtx); + return (FALSE); + } + VERIFY(1 < refcount_add(&dn->dn_holds, tag)); + mutex_exit(&dn->dn_mtx); + return (TRUE); +} + +void +dnode_rele(dnode_t *dn, void *tag) +{ + uint64_t refs; + + mutex_enter(&dn->dn_mtx); + refs = refcount_remove(&dn->dn_holds, tag); + mutex_exit(&dn->dn_mtx); + /* NOTE: the DNODE_DNODE does not have a dn_dbuf */ + if (refs == 0 && dn->dn_dbuf) + dbuf_rele(dn->dn_dbuf, dn); +} + +void +dnode_setdirty(dnode_t *dn, dmu_tx_t *tx) +{ + objset_impl_t *os = dn->dn_objset; + uint64_t txg = tx->tx_txg; + + if (dn->dn_object == DMU_META_DNODE_OBJECT) + return; + + DNODE_VERIFY(dn); + +#ifdef ZFS_DEBUG + mutex_enter(&dn->dn_mtx); + ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg); + /* ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); */ + mutex_exit(&dn->dn_mtx); +#endif + + mutex_enter(&os->os_lock); + + /* + * If we are already marked dirty, we're done. + */ + if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) { + mutex_exit(&os->os_lock); + return; + } + + ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs)); + ASSERT(dn->dn_datablksz != 0); + ASSERT3U(dn->dn_next_bonuslen[txg&TXG_MASK], ==, 0); + ASSERT3U(dn->dn_next_blksz[txg&TXG_MASK], ==, 0); + + dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n", + dn->dn_object, txg); + + if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) { + list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn); + } else { + list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn); + } + + mutex_exit(&os->os_lock); + + /* + * The dnode maintains a hold on its containing dbuf as + * long as there are holds on it. Each instantiated child + * dbuf maintaines a hold on the dnode. When the last child + * drops its hold, the dnode will drop its hold on the + * containing dbuf. We add a "dirty hold" here so that the + * dnode will hang around after we finish processing its + * children. + */ + VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg)); + + (void) dbuf_dirty(dn->dn_dbuf, tx); + + dsl_dataset_dirty(os->os_dsl_dataset, tx); +} + +void +dnode_free(dnode_t *dn, dmu_tx_t *tx) +{ + int txgoff = tx->tx_txg & TXG_MASK; + + dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg); + + /* we should be the only holder... hopefully */ + /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */ + + mutex_enter(&dn->dn_mtx); + if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) { + mutex_exit(&dn->dn_mtx); + return; + } + dn->dn_free_txg = tx->tx_txg; + mutex_exit(&dn->dn_mtx); + + /* + * If the dnode is already dirty, it needs to be moved from + * the dirty list to the free list. + */ + mutex_enter(&dn->dn_objset->os_lock); + if (list_link_active(&dn->dn_dirty_link[txgoff])) { + list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn); + list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn); + mutex_exit(&dn->dn_objset->os_lock); + } else { + mutex_exit(&dn->dn_objset->os_lock); + dnode_setdirty(dn, tx); + } +} + +/* + * Try to change the block size for the indicated dnode. This can only + * succeed if there are no blocks allocated or dirty beyond first block + */ +int +dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db, *db_next; + int err; + + if (size == 0) + size = SPA_MINBLOCKSIZE; + if (size > SPA_MAXBLOCKSIZE) + size = SPA_MAXBLOCKSIZE; + else + size = P2ROUNDUP(size, SPA_MINBLOCKSIZE); + + if (ibs == dn->dn_indblkshift) + ibs = 0; + + if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0) + return (0); + + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + + /* Check for any allocated blocks beyond the first */ + if (dn->dn_phys->dn_maxblkid != 0) + goto fail; + + mutex_enter(&dn->dn_dbufs_mtx); + for (db = list_head(&dn->dn_dbufs); db; db = db_next) { + db_next = list_next(&dn->dn_dbufs, db); + + if (db->db_blkid != 0 && db->db_blkid != DB_BONUS_BLKID) { + mutex_exit(&dn->dn_dbufs_mtx); + goto fail; + } + } + mutex_exit(&dn->dn_dbufs_mtx); + + if (ibs && dn->dn_nlevels != 1) + goto fail; + + /* resize the old block */ + err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db); + if (err == 0) + dbuf_new_size(db, size, tx); + else if (err != ENOENT) + goto fail; + + dnode_setdblksz(dn, size); + dnode_setdirty(dn, tx); + dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size; + if (ibs) { + dn->dn_indblkshift = ibs; + dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; + } + /* rele after we have fixed the blocksize in the dnode */ + if (db) + dbuf_rele(db, FTAG); + + rw_exit(&dn->dn_struct_rwlock); + return (0); + +fail: + rw_exit(&dn->dn_struct_rwlock); + return (ENOTSUP); +} + +/* read-holding callers must not rely on the lock being continuously held */ +void +dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read) +{ + uint64_t txgoff = tx->tx_txg & TXG_MASK; + int epbs, new_nlevels; + uint64_t sz; + + ASSERT(blkid != DB_BONUS_BLKID); + + ASSERT(have_read ? + RW_READ_HELD(&dn->dn_struct_rwlock) : + RW_WRITE_HELD(&dn->dn_struct_rwlock)); + + /* + * if we have a read-lock, check to see if we need to do any work + * before upgrading to a write-lock. + */ + if (have_read) { + if (blkid <= dn->dn_maxblkid) + return; + + if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { + rw_exit(&dn->dn_struct_rwlock); + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + } + } + + if (blkid <= dn->dn_maxblkid) + goto out; + + dn->dn_maxblkid = blkid; + + /* + * Compute the number of levels necessary to support the new maxblkid. + */ + new_nlevels = 1; + epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + for (sz = dn->dn_nblkptr; + sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs) + new_nlevels++; + + if (new_nlevels > dn->dn_nlevels) { + int old_nlevels = dn->dn_nlevels; + dmu_buf_impl_t *db; + list_t *list; + dbuf_dirty_record_t *new, *dr, *dr_next; + + dn->dn_nlevels = new_nlevels; + + ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]); + dn->dn_next_nlevels[txgoff] = new_nlevels; + + /* dirty the left indirects */ + db = dbuf_hold_level(dn, old_nlevels, 0, FTAG); + new = dbuf_dirty(db, tx); + dbuf_rele(db, FTAG); + + /* transfer the dirty records to the new indirect */ + mutex_enter(&dn->dn_mtx); + mutex_enter(&new->dt.di.dr_mtx); + list = &dn->dn_dirty_records[txgoff]; + for (dr = list_head(list); dr; dr = dr_next) { + dr_next = list_next(&dn->dn_dirty_records[txgoff], dr); + if (dr->dr_dbuf->db_level != new_nlevels-1 && + dr->dr_dbuf->db_blkid != DB_BONUS_BLKID) { + ASSERT(dr->dr_dbuf->db_level == old_nlevels-1); + list_remove(&dn->dn_dirty_records[txgoff], dr); + list_insert_tail(&new->dt.di.dr_children, dr); + dr->dr_parent = new; + } + } + mutex_exit(&new->dt.di.dr_mtx); + mutex_exit(&dn->dn_mtx); + } + +out: + if (have_read) + rw_downgrade(&dn->dn_struct_rwlock); +} + +void +dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) +{ + avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK]; + avl_index_t where; + free_range_t *rp; + free_range_t rp_tofind; + uint64_t endblk = blkid + nblks; + + ASSERT(MUTEX_HELD(&dn->dn_mtx)); + ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */ + + dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", + blkid, nblks, tx->tx_txg); + rp_tofind.fr_blkid = blkid; + rp = avl_find(tree, &rp_tofind, &where); + if (rp == NULL) + rp = avl_nearest(tree, where, AVL_BEFORE); + if (rp == NULL) + rp = avl_nearest(tree, where, AVL_AFTER); + + while (rp && (rp->fr_blkid <= blkid + nblks)) { + uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks; + free_range_t *nrp = AVL_NEXT(tree, rp); + + if (blkid <= rp->fr_blkid && endblk >= fr_endblk) { + /* clear this entire range */ + avl_remove(tree, rp); + kmem_free(rp, sizeof (free_range_t)); + } else if (blkid <= rp->fr_blkid && + endblk > rp->fr_blkid && endblk < fr_endblk) { + /* clear the beginning of this range */ + rp->fr_blkid = endblk; + rp->fr_nblks = fr_endblk - endblk; + } else if (blkid > rp->fr_blkid && blkid < fr_endblk && + endblk >= fr_endblk) { + /* clear the end of this range */ + rp->fr_nblks = blkid - rp->fr_blkid; + } else if (blkid > rp->fr_blkid && endblk < fr_endblk) { + /* clear a chunk out of this range */ + free_range_t *new_rp = + kmem_alloc(sizeof (free_range_t), KM_SLEEP); + + new_rp->fr_blkid = endblk; + new_rp->fr_nblks = fr_endblk - endblk; + avl_insert_here(tree, new_rp, rp, AVL_AFTER); + rp->fr_nblks = blkid - rp->fr_blkid; + } + /* there may be no overlap */ + rp = nrp; + } +} + +void +dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db; + uint64_t blkoff, blkid, nblks; + int blksz, blkshift, head, tail; + int trunc = FALSE; + int epbs; + + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + blksz = dn->dn_datablksz; + blkshift = dn->dn_datablkshift; + epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; + + if (len == -1ULL) { + len = UINT64_MAX - off; + trunc = TRUE; + } + + /* + * First, block align the region to free: + */ + if (ISP2(blksz)) { + head = P2NPHASE(off, blksz); + blkoff = P2PHASE(off, blksz); + if ((off >> blkshift) > dn->dn_maxblkid) + goto out; + } else { + ASSERT(dn->dn_maxblkid == 0); + if (off == 0 && len >= blksz) { + /* Freeing the whole block; fast-track this request */ + blkid = 0; + nblks = 1; + goto done; + } else if (off >= blksz) { + /* Freeing past end-of-data */ + goto out; + } else { + /* Freeing part of the block. */ + head = blksz - off; + ASSERT3U(head, >, 0); + } + blkoff = off; + } + /* zero out any partial block data at the start of the range */ + if (head) { + ASSERT3U(blkoff + head, ==, blksz); + if (len < head) + head = len; + if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE, + FTAG, &db) == 0) { + caddr_t data; + + /* don't dirty if it isn't on disk and isn't dirty */ + if (db->db_last_dirty || + (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { + rw_exit(&dn->dn_struct_rwlock); + dbuf_will_dirty(db, tx); + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + data = db->db.db_data; + bzero(data + blkoff, head); + } + dbuf_rele(db, FTAG); + } + off += head; + len -= head; + } + + /* If the range was less than one block, we're done */ + if (len == 0) + goto out; + + /* If the remaining range is past end of file, we're done */ + if ((off >> blkshift) > dn->dn_maxblkid) + goto out; + + ASSERT(ISP2(blksz)); + if (trunc) + tail = 0; + else + tail = P2PHASE(len, blksz); + + ASSERT3U(P2PHASE(off, blksz), ==, 0); + /* zero out any partial block data at the end of the range */ + if (tail) { + if (len < tail) + tail = len; + if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len), + TRUE, FTAG, &db) == 0) { + /* don't dirty if not on disk and not dirty */ + if (db->db_last_dirty || + (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { + rw_exit(&dn->dn_struct_rwlock); + dbuf_will_dirty(db, tx); + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + bzero(db->db.db_data, tail); + } + dbuf_rele(db, FTAG); + } + len -= tail; + } + + /* If the range did not include a full block, we are done */ + if (len == 0) + goto out; + + ASSERT(IS_P2ALIGNED(off, blksz)); + ASSERT(trunc || IS_P2ALIGNED(len, blksz)); + blkid = off >> blkshift; + nblks = len >> blkshift; + if (trunc) + nblks += 1; + + /* + * Read in and mark all the level-1 indirects dirty, + * so that they will stay in memory until syncing phase. + * Always dirty the first and last indirect to make sure + * we dirty all the partial indirects. + */ + if (dn->dn_nlevels > 1) { + uint64_t i, first, last; + int shift = epbs + dn->dn_datablkshift; + + first = blkid >> epbs; + if (db = dbuf_hold_level(dn, 1, first, FTAG)) { + dbuf_will_dirty(db, tx); + dbuf_rele(db, FTAG); + } + if (trunc) + last = dn->dn_maxblkid >> epbs; + else + last = (blkid + nblks - 1) >> epbs; + if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) { + dbuf_will_dirty(db, tx); + dbuf_rele(db, FTAG); + } + for (i = first + 1; i < last; i++) { + uint64_t ibyte = i << shift; + int err; + + err = dnode_next_offset(dn, + DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0); + i = ibyte >> shift; + if (err == ESRCH || i >= last) + break; + ASSERT(err == 0); + db = dbuf_hold_level(dn, 1, i, FTAG); + if (db) { + dbuf_will_dirty(db, tx); + dbuf_rele(db, FTAG); + } + } + } +done: + /* + * Add this range to the dnode range list. + * We will finish up this free operation in the syncing phase. + */ + mutex_enter(&dn->dn_mtx); + dnode_clear_range(dn, blkid, nblks, tx); + { + free_range_t *rp, *found; + avl_index_t where; + avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK]; + + /* Add new range to dn_ranges */ + rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP); + rp->fr_blkid = blkid; + rp->fr_nblks = nblks; + found = avl_find(tree, rp, &where); + ASSERT(found == NULL); + avl_insert(tree, rp, where); + dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", + blkid, nblks, tx->tx_txg); + } + mutex_exit(&dn->dn_mtx); + + dbuf_free_range(dn, blkid, blkid + nblks - 1, tx); + dnode_setdirty(dn, tx); +out: + if (trunc && dn->dn_maxblkid >= (off >> blkshift)) + dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0); + + rw_exit(&dn->dn_struct_rwlock); +} + +/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */ +uint64_t +dnode_block_freed(dnode_t *dn, uint64_t blkid) +{ + free_range_t range_tofind; + void *dp = spa_get_dsl(dn->dn_objset->os_spa); + int i; + + if (blkid == DB_BONUS_BLKID) + return (FALSE); + + /* + * If we're in the process of opening the pool, dp will not be + * set yet, but there shouldn't be anything dirty. + */ + if (dp == NULL) + return (FALSE); + + if (dn->dn_free_txg) + return (TRUE); + + /* + * If dn_datablkshift is not set, then there's only a single + * block, in which case there will never be a free range so it + * won't matter. + */ + range_tofind.fr_blkid = blkid; + mutex_enter(&dn->dn_mtx); + for (i = 0; i < TXG_SIZE; i++) { + free_range_t *range_found; + avl_index_t idx; + + range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx); + if (range_found) { + ASSERT(range_found->fr_nblks > 0); + break; + } + range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE); + if (range_found && + range_found->fr_blkid + range_found->fr_nblks > blkid) + break; + } + mutex_exit(&dn->dn_mtx); + return (i < TXG_SIZE); +} + +/* call from syncing context when we actually write/free space for this dnode */ +void +dnode_diduse_space(dnode_t *dn, int64_t delta) +{ + uint64_t space; + dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n", + dn, dn->dn_phys, + (u_longlong_t)dn->dn_phys->dn_used, + (longlong_t)delta); + + mutex_enter(&dn->dn_mtx); + space = DN_USED_BYTES(dn->dn_phys); + if (delta > 0) { + ASSERT3U(space + delta, >=, space); /* no overflow */ + } else { + ASSERT3U(space, >=, -delta); /* no underflow */ + } + space += delta; + if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) { + ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0); + ASSERT3U(P2PHASE(space, 1<dn_phys->dn_used = space >> DEV_BSHIFT; + } else { + dn->dn_phys->dn_used = space; + dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES; + } + mutex_exit(&dn->dn_mtx); +} + +/* + * Call when we think we're going to write/free space in open context. + * Be conservative (ie. OK to write less than this or free more than + * this, but don't write more or free less). + */ +void +dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx) +{ + objset_impl_t *os = dn->dn_objset; + dsl_dataset_t *ds = os->os_dsl_dataset; + + if (space > 0) + space = spa_get_asize(os->os_spa, space); + + if (ds) + dsl_dir_willuse_space(ds->ds_dir, space, tx); + + dmu_tx_willuse_space(tx, space); +} + +static int +dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset, + int lvl, uint64_t blkfill, uint64_t txg) +{ + dmu_buf_impl_t *db = NULL; + void *data = NULL; + uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + uint64_t epb = 1ULL << epbs; + uint64_t minfill, maxfill; + boolean_t hole; + int i, inc, error, span; + + dprintf("probing object %llu offset %llx level %d of %u\n", + dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels); + + hole = flags & DNODE_FIND_HOLE; + inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1; + ASSERT(txg == 0 || !hole); + + if (lvl == dn->dn_phys->dn_nlevels) { + error = 0; + epb = dn->dn_phys->dn_nblkptr; + data = dn->dn_phys->dn_blkptr; + } else { + uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl); + error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db); + if (error) { + if (error != ENOENT) + return (error); + if (hole) + return (0); + /* + * This can only happen when we are searching up + * the block tree for data. We don't really need to + * adjust the offset, as we will just end up looking + * at the pointer to this block in its parent, and its + * going to be unallocated, so we will skip over it. + */ + return (ESRCH); + } + error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT); + if (error) { + dbuf_rele(db, FTAG); + return (error); + } + data = db->db.db_data; + } + + if (db && txg && + (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) { + /* + * This can only happen when we are searching up the tree + * and these conditions mean that we need to keep climbing. + */ + error = ESRCH; + } else if (lvl == 0) { + dnode_phys_t *dnp = data; + span = DNODE_SHIFT; + ASSERT(dn->dn_type == DMU_OT_DNODE); + + for (i = (*offset >> span) & (blkfill - 1); + i >= 0 && i < blkfill; i += inc) { + boolean_t newcontents = B_TRUE; + if (txg) { + int j; + newcontents = B_FALSE; + for (j = 0; j < dnp[i].dn_nblkptr; j++) { + if (dnp[i].dn_blkptr[j].blk_birth > txg) + newcontents = B_TRUE; + } + } + if (!dnp[i].dn_type == hole && newcontents) + break; + *offset += (1ULL << span) * inc; + } + if (i < 0 || i == blkfill) + error = ESRCH; + } else { + blkptr_t *bp = data; + span = (lvl - 1) * epbs + dn->dn_datablkshift; + minfill = 0; + maxfill = blkfill << ((lvl - 1) * epbs); + + if (hole) + maxfill--; + else + minfill++; + + for (i = (*offset >> span) & ((1ULL << epbs) - 1); + i >= 0 && i < epb; i += inc) { + if (bp[i].blk_fill >= minfill && + bp[i].blk_fill <= maxfill && + (hole || bp[i].blk_birth > txg)) + break; + if (inc < 0 && *offset < (1ULL << span)) + *offset = 0; + else + *offset += (1ULL << span) * inc; + } + if (i < 0 || i == epb) + error = ESRCH; + } + + if (db) + dbuf_rele(db, FTAG); + + return (error); +} + +/* + * Find the next hole, data, or sparse region at or after *offset. + * The value 'blkfill' tells us how many items we expect to find + * in an L0 data block; this value is 1 for normal objects, + * DNODES_PER_BLOCK for the meta dnode, and some fraction of + * DNODES_PER_BLOCK when searching for sparse regions thereof. + * + * Examples: + * + * dnode_next_offset(dn, flags, offset, 1, 1, 0); + * Finds the next/previous hole/data in a file. + * Used in dmu_offset_next(). + * + * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg); + * Finds the next free/allocated dnode an objset's meta-dnode. + * Only finds objects that have new contents since txg (ie. + * bonus buffer changes and content removal are ignored). + * Used in dmu_object_next(). + * + * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0); + * Finds the next L2 meta-dnode bp that's at most 1/4 full. + * Used in dmu_object_alloc(). + */ +int +dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset, + int minlvl, uint64_t blkfill, uint64_t txg) +{ + uint64_t initial_offset = *offset; + int lvl, maxlvl; + int error = 0; + + if (!(flags & DNODE_FIND_HAVELOCK)) + rw_enter(&dn->dn_struct_rwlock, RW_READER); + + if (dn->dn_phys->dn_nlevels == 0) { + error = ESRCH; + goto out; + } + + if (dn->dn_datablkshift == 0) { + if (*offset < dn->dn_datablksz) { + if (flags & DNODE_FIND_HOLE) + *offset = dn->dn_datablksz; + } else { + error = ESRCH; + } + goto out; + } + + maxlvl = dn->dn_phys->dn_nlevels; + + for (lvl = minlvl; lvl <= maxlvl; lvl++) { + error = dnode_next_offset_level(dn, + flags, offset, lvl, blkfill, txg); + if (error != ESRCH) + break; + } + + while (error == 0 && --lvl >= minlvl) { + error = dnode_next_offset_level(dn, + flags, offset, lvl, blkfill, txg); + } + + if (error == 0 && (flags & DNODE_FIND_BACKWARDS ? + initial_offset < *offset : initial_offset > *offset)) + error = ESRCH; +out: + if (!(flags & DNODE_FIND_HAVELOCK)) + rw_exit(&dn->dn_struct_rwlock); + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dnode_sync.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dnode_sync.c new file mode 100644 index 000000000000..779cfc96f9e3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dnode_sync.c @@ -0,0 +1,623 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include + +static void +dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) +{ + dmu_buf_impl_t *db; + int txgoff = tx->tx_txg & TXG_MASK; + int nblkptr = dn->dn_phys->dn_nblkptr; + int old_toplvl = dn->dn_phys->dn_nlevels - 1; + int new_level = dn->dn_next_nlevels[txgoff]; + int i; + + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + + /* this dnode can't be paged out because it's dirty */ + ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); + ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); + ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); + + db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); + ASSERT(db != NULL); + + dn->dn_phys->dn_nlevels = new_level; + dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, + dn->dn_object, dn->dn_phys->dn_nlevels); + + /* check for existing blkptrs in the dnode */ + for (i = 0; i < nblkptr; i++) + if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i])) + break; + if (i != nblkptr) { + /* transfer dnode's block pointers to new indirect block */ + (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); + ASSERT(db->db.db_data); + ASSERT(arc_released(db->db_buf)); + ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); + bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, + sizeof (blkptr_t) * nblkptr); + arc_buf_freeze(db->db_buf); + } + + /* set dbuf's parent pointers to new indirect buf */ + for (i = 0; i < nblkptr; i++) { + dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i); + + if (child == NULL) + continue; + ASSERT3P(child->db_dnode, ==, dn); + if (child->db_parent && child->db_parent != dn->dn_dbuf) { + ASSERT(child->db_parent->db_level == db->db_level); + ASSERT(child->db_blkptr != + &dn->dn_phys->dn_blkptr[child->db_blkid]); + mutex_exit(&child->db_mtx); + continue; + } + ASSERT(child->db_parent == NULL || + child->db_parent == dn->dn_dbuf); + + child->db_parent = db; + dbuf_add_ref(db, child); + if (db->db.db_data) + child->db_blkptr = (blkptr_t *)db->db.db_data + i; + else + child->db_blkptr = NULL; + dprintf_dbuf_bp(child, child->db_blkptr, + "changed db_blkptr to new indirect %s", ""); + + mutex_exit(&child->db_mtx); + } + + bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); + + dbuf_rele(db, FTAG); + + rw_exit(&dn->dn_struct_rwlock); +} + +static int +free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; + uint64_t bytesfreed = 0; + int i, blocks_freed = 0; + + dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); + + for (i = 0; i < num; i++, bp++) { + if (BP_IS_HOLE(bp)) + continue; + + bytesfreed += dsl_dataset_block_kill(ds, bp, dn->dn_zio, tx); + ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); + bzero(bp, sizeof (blkptr_t)); + blocks_freed += 1; + } + dnode_diduse_space(dn, -bytesfreed); + return (blocks_freed); +} + +#ifdef ZFS_DEBUG +static void +free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) +{ + int off, num; + int i, err, epbs; + uint64_t txg = tx->tx_txg; + + epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + off = start - (db->db_blkid * 1<=, 0); + ASSERT3U(num, >=, 0); + ASSERT3U(db->db_level, >, 0); + ASSERT3U(db->db.db_size, ==, 1<db_dnode->dn_phys->dn_indblkshift); + ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); + ASSERT(db->db_blkptr != NULL); + + for (i = off; i < off+num; i++) { + uint64_t *buf; + dmu_buf_impl_t *child; + dbuf_dirty_record_t *dr; + int j; + + ASSERT(db->db_level == 1); + + rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); + err = dbuf_hold_impl(db->db_dnode, db->db_level-1, + (db->db_blkid << epbs) + i, TRUE, FTAG, &child); + rw_exit(&db->db_dnode->dn_struct_rwlock); + if (err == ENOENT) + continue; + ASSERT(err == 0); + ASSERT(child->db_level == 0); + dr = child->db_last_dirty; + while (dr && dr->dr_txg > txg) + dr = dr->dr_next; + ASSERT(dr == NULL || dr->dr_txg == txg); + + /* data_old better be zeroed */ + if (dr) { + buf = dr->dt.dl.dr_data->b_data; + for (j = 0; j < child->db.db_size >> 3; j++) { + if (buf[j] != 0) { + panic("freed data not zero: " + "child=%p i=%d off=%d num=%d\n", + (void *)child, i, off, num); + } + } + } + + /* + * db_data better be zeroed unless it's dirty in a + * future txg. + */ + mutex_enter(&child->db_mtx); + buf = child->db.db_data; + if (buf != NULL && child->db_state != DB_FILL && + child->db_last_dirty == NULL) { + for (j = 0; j < child->db.db_size >> 3; j++) { + if (buf[j] != 0) { + panic("freed data not zero: " + "child=%p i=%d off=%d num=%d\n", + (void *)child, i, off, num); + } + } + } + mutex_exit(&child->db_mtx); + + dbuf_rele(child, FTAG); + } +} +#endif + +#define ALL -1 + +static int +free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc, + dmu_tx_t *tx) +{ + dnode_t *dn = db->db_dnode; + blkptr_t *bp; + dmu_buf_impl_t *subdb; + uint64_t start, end, dbstart, dbend, i; + int epbs, shift, err; + int all = TRUE; + int blocks_freed = 0; + + /* + * There is a small possibility that this block will not be cached: + * 1 - if level > 1 and there are no children with level <= 1 + * 2 - if we didn't get a dirty hold (because this block had just + * finished being written -- and so had no holds), and then this + * block got evicted before we got here. + */ + if (db->db_state != DB_CACHED) + (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); + + arc_release(db->db_buf, db); + bp = (blkptr_t *)db->db.db_data; + + epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; + shift = (db->db_level - 1) * epbs; + dbstart = db->db_blkid << epbs; + start = blkid >> shift; + if (dbstart < start) { + bp += start - dbstart; + all = FALSE; + } else { + start = dbstart; + } + dbend = ((db->db_blkid + 1) << epbs) - 1; + end = (blkid + nblks - 1) >> shift; + if (dbend <= end) + end = dbend; + else if (all) + all = trunc; + ASSERT3U(start, <=, end); + + if (db->db_level == 1) { + FREE_VERIFY(db, start, end, tx); + blocks_freed = free_blocks(dn, bp, end-start+1, tx); + arc_buf_freeze(db->db_buf); + ASSERT(all || blocks_freed == 0 || db->db_last_dirty); + return (all ? ALL : blocks_freed); + } + + for (i = start; i <= end; i++, bp++) { + if (BP_IS_HOLE(bp)) + continue; + rw_enter(&dn->dn_struct_rwlock, RW_READER); + err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb); + ASSERT3U(err, ==, 0); + rw_exit(&dn->dn_struct_rwlock); + + if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) { + ASSERT3P(subdb->db_blkptr, ==, bp); + blocks_freed += free_blocks(dn, bp, 1, tx); + } else { + all = FALSE; + } + dbuf_rele(subdb, FTAG); + } + arc_buf_freeze(db->db_buf); +#ifdef ZFS_DEBUG + bp -= (end-start)+1; + for (i = start; i <= end; i++, bp++) { + if (i == start && blkid != 0) + continue; + else if (i == end && !trunc) + continue; + ASSERT3U(bp->blk_birth, ==, 0); + } +#endif + ASSERT(all || blocks_freed == 0 || db->db_last_dirty); + return (all ? ALL : blocks_freed); +} + +/* + * free_range: Traverse the indicated range of the provided file + * and "free" all the blocks contained there. + */ +static void +dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) +{ + blkptr_t *bp = dn->dn_phys->dn_blkptr; + dmu_buf_impl_t *db; + int trunc, start, end, shift, i, err; + int dnlevel = dn->dn_phys->dn_nlevels; + + if (blkid > dn->dn_phys->dn_maxblkid) + return; + + ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); + trunc = blkid + nblks > dn->dn_phys->dn_maxblkid; + if (trunc) + nblks = dn->dn_phys->dn_maxblkid - blkid + 1; + + /* There are no indirect blocks in the object */ + if (dnlevel == 1) { + if (blkid >= dn->dn_phys->dn_nblkptr) { + /* this range was never made persistent */ + return; + } + ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); + (void) free_blocks(dn, bp + blkid, nblks, tx); + if (trunc) { + uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * + (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); + dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); + ASSERT(off < dn->dn_phys->dn_maxblkid || + dn->dn_phys->dn_maxblkid == 0 || + dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); + } + return; + } + + shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); + start = blkid >> shift; + ASSERT(start < dn->dn_phys->dn_nblkptr); + end = (blkid + nblks - 1) >> shift; + bp += start; + for (i = start; i <= end; i++, bp++) { + if (BP_IS_HOLE(bp)) + continue; + rw_enter(&dn->dn_struct_rwlock, RW_READER); + err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db); + ASSERT3U(err, ==, 0); + rw_exit(&dn->dn_struct_rwlock); + + if (free_children(db, blkid, nblks, trunc, tx) == ALL) { + ASSERT3P(db->db_blkptr, ==, bp); + (void) free_blocks(dn, bp, 1, tx); + } + dbuf_rele(db, FTAG); + } + if (trunc) { + uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * + (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); + dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); + ASSERT(off < dn->dn_phys->dn_maxblkid || + dn->dn_phys->dn_maxblkid == 0 || + dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); + } +} + +/* + * Try to kick all the dnodes dbufs out of the cache... + */ +void +dnode_evict_dbufs(dnode_t *dn) +{ + int progress; + int pass = 0; + + do { + dmu_buf_impl_t *db, marker; + int evicting = FALSE; + + progress = FALSE; + mutex_enter(&dn->dn_dbufs_mtx); + list_insert_tail(&dn->dn_dbufs, &marker); + db = list_head(&dn->dn_dbufs); + for (; db != ▮ db = list_head(&dn->dn_dbufs)) { + list_remove(&dn->dn_dbufs, db); + list_insert_tail(&dn->dn_dbufs, db); + ASSERT3P(db->db_dnode, ==, dn); + + mutex_enter(&db->db_mtx); + if (db->db_state == DB_EVICTING) { + progress = TRUE; + evicting = TRUE; + mutex_exit(&db->db_mtx); + } else if (refcount_is_zero(&db->db_holds)) { + progress = TRUE; + dbuf_clear(db); /* exits db_mtx for us */ + } else { + mutex_exit(&db->db_mtx); + } + + } + list_remove(&dn->dn_dbufs, &marker); + /* + * NB: we need to drop dn_dbufs_mtx between passes so + * that any DB_EVICTING dbufs can make progress. + * Ideally, we would have some cv we could wait on, but + * since we don't, just wait a bit to give the other + * thread a chance to run. + */ + mutex_exit(&dn->dn_dbufs_mtx); + if (evicting) + delay(1); + pass++; + ASSERT(pass < 100); /* sanity check */ + } while (progress); + + rw_enter(&dn->dn_struct_rwlock, RW_WRITER); + if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) { + mutex_enter(&dn->dn_bonus->db_mtx); + dbuf_evict(dn->dn_bonus); + dn->dn_bonus = NULL; + } + rw_exit(&dn->dn_struct_rwlock); +} + +static void +dnode_undirty_dbufs(list_t *list) +{ + dbuf_dirty_record_t *dr; + + while (dr = list_head(list)) { + dmu_buf_impl_t *db = dr->dr_dbuf; + uint64_t txg = dr->dr_txg; + + mutex_enter(&db->db_mtx); + /* XXX - use dbuf_undirty()? */ + list_remove(list, dr); + ASSERT(db->db_last_dirty == dr); + db->db_last_dirty = NULL; + db->db_dirtycnt -= 1; + if (db->db_level == 0) { + ASSERT(db->db_blkid == DB_BONUS_BLKID || + dr->dt.dl.dr_data == db->db_buf); + dbuf_unoverride(dr); + mutex_exit(&db->db_mtx); + } else { + mutex_exit(&db->db_mtx); + dnode_undirty_dbufs(&dr->dt.di.dr_children); + } + kmem_free(dr, sizeof (dbuf_dirty_record_t)); + dbuf_rele(db, (void *)(uintptr_t)txg); + } +} + +static void +dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) +{ + int txgoff = tx->tx_txg & TXG_MASK; + + ASSERT(dmu_tx_is_syncing(tx)); + + /* + * Our contents should have been freed in dnode_sync() by the + * free range record inserted by the caller of dnode_free(). + */ + ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0); + ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); + + dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); + dnode_evict_dbufs(dn); + ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); + + /* + * XXX - It would be nice to assert this, but we may still + * have residual holds from async evictions from the arc... + * + * zfs_obj_to_path() also depends on this being + * commented out. + * + * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); + */ + + /* Undirty next bits */ + dn->dn_next_nlevels[txgoff] = 0; + dn->dn_next_indblkshift[txgoff] = 0; + dn->dn_next_blksz[txgoff] = 0; + + /* ASSERT(blkptrs are zero); */ + ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); + ASSERT(dn->dn_type != DMU_OT_NONE); + + ASSERT(dn->dn_free_txg > 0); + if (dn->dn_allocated_txg != dn->dn_free_txg) + dbuf_will_dirty(dn->dn_dbuf, tx); + bzero(dn->dn_phys, sizeof (dnode_phys_t)); + + mutex_enter(&dn->dn_mtx); + dn->dn_type = DMU_OT_NONE; + dn->dn_maxblkid = 0; + dn->dn_allocated_txg = 0; + dn->dn_free_txg = 0; + mutex_exit(&dn->dn_mtx); + + ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); + + dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); + /* + * Now that we've released our hold, the dnode may + * be evicted, so we musn't access it. + */ +} + +/* + * Write out the dnode's dirty buffers. + * + * NOTE: The dnode is kept in memory by being dirty. Once the + * dirty bit is cleared, it may be evicted. Beware of this! + */ +void +dnode_sync(dnode_t *dn, dmu_tx_t *tx) +{ + free_range_t *rp; + dnode_phys_t *dnp = dn->dn_phys; + int txgoff = tx->tx_txg & TXG_MASK; + list_t *list = &dn->dn_dirty_records[txgoff]; + + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); + DNODE_VERIFY(dn); + + ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); + + mutex_enter(&dn->dn_mtx); + if (dn->dn_allocated_txg == tx->tx_txg) { + /* The dnode is newly allocated or reallocated */ + if (dnp->dn_type == DMU_OT_NONE) { + /* this is a first alloc, not a realloc */ + /* XXX shouldn't the phys already be zeroed? */ + bzero(dnp, DNODE_CORE_SIZE); + dnp->dn_nlevels = 1; + } + + if (dn->dn_nblkptr > dnp->dn_nblkptr) { + /* zero the new blkptrs we are gaining */ + bzero(dnp->dn_blkptr + dnp->dn_nblkptr, + sizeof (blkptr_t) * + (dn->dn_nblkptr - dnp->dn_nblkptr)); + } + dnp->dn_type = dn->dn_type; + dnp->dn_bonustype = dn->dn_bonustype; + dnp->dn_bonuslen = dn->dn_bonuslen; + dnp->dn_nblkptr = dn->dn_nblkptr; + } + + ASSERT(dnp->dn_nlevels > 1 || + BP_IS_HOLE(&dnp->dn_blkptr[0]) || + BP_GET_LSIZE(&dnp->dn_blkptr[0]) == + dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); + + if (dn->dn_next_blksz[txgoff]) { + ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], + SPA_MINBLOCKSIZE) == 0); + ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || + dn->dn_maxblkid == 0 || list_head(list) != NULL || + dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == + dnp->dn_datablkszsec); + dnp->dn_datablkszsec = + dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; + dn->dn_next_blksz[txgoff] = 0; + } + + if (dn->dn_next_bonuslen[txgoff]) { + if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) + dnp->dn_bonuslen = 0; + else + dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; + ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN); + dn->dn_next_bonuslen[txgoff] = 0; + } + + if (dn->dn_next_indblkshift[txgoff]) { + ASSERT(dnp->dn_nlevels == 1); + dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; + dn->dn_next_indblkshift[txgoff] = 0; + } + + /* + * Just take the live (open-context) values for checksum and compress. + * Strictly speaking it's a future leak, but nothing bad happens if we + * start using the new checksum or compress algorithm a little early. + */ + dnp->dn_checksum = dn->dn_checksum; + dnp->dn_compress = dn->dn_compress; + + mutex_exit(&dn->dn_mtx); + + /* process all the "freed" ranges in the file */ + while (rp = avl_last(&dn->dn_ranges[txgoff])) { + dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx); + /* grab the mutex so we don't race with dnode_block_freed() */ + mutex_enter(&dn->dn_mtx); + avl_remove(&dn->dn_ranges[txgoff], rp); + mutex_exit(&dn->dn_mtx); + kmem_free(rp, sizeof (free_range_t)); + } + + if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) { + dnode_sync_free(dn, tx); + return; + } + + if (dn->dn_next_nlevels[txgoff]) { + dnode_increase_indirection(dn, tx); + dn->dn_next_nlevels[txgoff] = 0; + } + + dbuf_sync_list(list, tx); + + if (dn->dn_object != DMU_META_DNODE_OBJECT) { + ASSERT3P(list_head(list), ==, NULL); + dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); + } + + /* + * Although we have dropped our reference to the dnode, it + * can't be evicted until its written, and we haven't yet + * initiated the IO for the dnode's dbuf. + */ +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dataset.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dataset.c new file mode 100644 index 000000000000..93ea8aa11173 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dataset.c @@ -0,0 +1,3103 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char *dsl_reaper = "the grim reaper"; + +static dsl_checkfunc_t dsl_dataset_destroy_begin_check; +static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; +static dsl_checkfunc_t dsl_dataset_rollback_check; +static dsl_syncfunc_t dsl_dataset_rollback_sync; +static dsl_syncfunc_t dsl_dataset_set_reservation_sync; + +#define DS_REF_MAX (1ULL << 62) + +#define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE + +#define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) + + +/* + * Figure out how much of this delta should be propogated to the dsl_dir + * layer. If there's a refreservation, that space has already been + * partially accounted for in our ancestors. + */ +static int64_t +parent_delta(dsl_dataset_t *ds, int64_t delta) +{ + uint64_t old_bytes, new_bytes; + + if (ds->ds_reserved == 0) + return (delta); + + old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); + new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); + + ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); + return (new_bytes - old_bytes); +} + +void +dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) +{ + int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); + int compressed = BP_GET_PSIZE(bp); + int uncompressed = BP_GET_UCSIZE(bp); + int64_t delta; + + dprintf_bp(bp, "born, ds=%p\n", ds); + + ASSERT(dmu_tx_is_syncing(tx)); + /* It could have been compressed away to nothing */ + if (BP_IS_HOLE(bp)) + return; + ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); + ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); + if (ds == NULL) { + /* + * Account for the meta-objset space in its placeholder + * dsl_dir. + */ + ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ + dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, + used, compressed, uncompressed, tx); + dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); + return; + } + dmu_buf_will_dirty(ds->ds_dbuf, tx); + mutex_enter(&ds->ds_dir->dd_lock); + mutex_enter(&ds->ds_lock); + delta = parent_delta(ds, used); + ds->ds_phys->ds_used_bytes += used; + ds->ds_phys->ds_compressed_bytes += compressed; + ds->ds_phys->ds_uncompressed_bytes += uncompressed; + ds->ds_phys->ds_unique_bytes += used; + mutex_exit(&ds->ds_lock); + dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta, + compressed, uncompressed, tx); + dsl_dir_transfer_space(ds->ds_dir, used - delta, + DD_USED_REFRSRV, DD_USED_HEAD, tx); + mutex_exit(&ds->ds_dir->dd_lock); +} + +int +dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio, + dmu_tx_t *tx) +{ + int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); + int compressed = BP_GET_PSIZE(bp); + int uncompressed = BP_GET_UCSIZE(bp); + + ASSERT(pio != NULL); + ASSERT(dmu_tx_is_syncing(tx)); + /* No block pointer => nothing to free */ + if (BP_IS_HOLE(bp)) + return (0); + + ASSERT(used > 0); + if (ds == NULL) { + int err; + /* + * Account for the meta-objset space in its placeholder + * dataset. + */ + err = dsl_free(pio, tx->tx_pool, + tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT); + ASSERT(err == 0); + + dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, + -used, -compressed, -uncompressed, tx); + dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); + return (used); + } + ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); + + ASSERT(!dsl_dataset_is_snapshot(ds)); + dmu_buf_will_dirty(ds->ds_dbuf, tx); + + if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { + int err; + int64_t delta; + + dprintf_bp(bp, "freeing: %s", ""); + err = dsl_free(pio, tx->tx_pool, + tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT); + ASSERT(err == 0); + + mutex_enter(&ds->ds_dir->dd_lock); + mutex_enter(&ds->ds_lock); + ASSERT(ds->ds_phys->ds_unique_bytes >= used || + !DS_UNIQUE_IS_ACCURATE(ds)); + delta = parent_delta(ds, -used); + ds->ds_phys->ds_unique_bytes -= used; + mutex_exit(&ds->ds_lock); + dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, + delta, -compressed, -uncompressed, tx); + dsl_dir_transfer_space(ds->ds_dir, -used - delta, + DD_USED_REFRSRV, DD_USED_HEAD, tx); + mutex_exit(&ds->ds_dir->dd_lock); + } else { + dprintf_bp(bp, "putting on dead list: %s", ""); + VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx)); + ASSERT3U(ds->ds_prev->ds_object, ==, + ds->ds_phys->ds_prev_snap_obj); + ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); + /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ + if (ds->ds_prev->ds_phys->ds_next_snap_obj == + ds->ds_object && bp->blk_birth > + ds->ds_prev->ds_phys->ds_prev_snap_txg) { + dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); + mutex_enter(&ds->ds_prev->ds_lock); + ds->ds_prev->ds_phys->ds_unique_bytes += used; + mutex_exit(&ds->ds_prev->ds_lock); + } + if (bp->blk_birth > ds->ds_origin_txg) { + dsl_dir_transfer_space(ds->ds_dir, used, + DD_USED_HEAD, DD_USED_SNAP, tx); + } + } + mutex_enter(&ds->ds_lock); + ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); + ds->ds_phys->ds_used_bytes -= used; + ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); + ds->ds_phys->ds_compressed_bytes -= compressed; + ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); + ds->ds_phys->ds_uncompressed_bytes -= uncompressed; + mutex_exit(&ds->ds_lock); + + return (used); +} + +uint64_t +dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) +{ + uint64_t trysnap = 0; + + if (ds == NULL) + return (0); + /* + * The snapshot creation could fail, but that would cause an + * incorrect FALSE return, which would only result in an + * overestimation of the amount of space that an operation would + * consume, which is OK. + * + * There's also a small window where we could miss a pending + * snapshot, because we could set the sync task in the quiescing + * phase. So this should only be used as a guess. + */ + if (ds->ds_trysnap_txg > + spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) + trysnap = ds->ds_trysnap_txg; + return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); +} + +int +dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth) +{ + return (blk_birth > dsl_dataset_prev_snap_txg(ds)); +} + +/* ARGSUSED */ +static void +dsl_dataset_evict(dmu_buf_t *db, void *dsv) +{ + dsl_dataset_t *ds = dsv; + + ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); + + dprintf_ds(ds, "evicting %s\n", ""); + + unique_remove(ds->ds_fsid_guid); + + if (ds->ds_user_ptr != NULL) + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + + if (ds->ds_prev) { + dsl_dataset_drop_ref(ds->ds_prev, ds); + ds->ds_prev = NULL; + } + + bplist_close(&ds->ds_deadlist); + if (ds->ds_dir) + dsl_dir_close(ds->ds_dir, ds); + + ASSERT(!list_link_active(&ds->ds_synced_link)); + + mutex_destroy(&ds->ds_lock); + mutex_destroy(&ds->ds_opening_lock); + mutex_destroy(&ds->ds_deadlist.bpl_lock); + rw_destroy(&ds->ds_rwlock); + cv_destroy(&ds->ds_exclusive_cv); + + kmem_free(ds, sizeof (dsl_dataset_t)); +} + +static int +dsl_dataset_get_snapname(dsl_dataset_t *ds) +{ + dsl_dataset_phys_t *headphys; + int err; + dmu_buf_t *headdbuf; + dsl_pool_t *dp = ds->ds_dir->dd_pool; + objset_t *mos = dp->dp_meta_objset; + + if (ds->ds_snapname[0]) + return (0); + if (ds->ds_phys->ds_next_snap_obj == 0) + return (0); + + err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, + FTAG, &headdbuf); + if (err) + return (err); + headphys = headdbuf->db_data; + err = zap_value_search(dp->dp_meta_objset, + headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); + dmu_buf_rele(headdbuf, FTAG); + return (err); +} + +static int +dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) +{ + objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; + uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; + matchtype_t mt; + int err; + + if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) + mt = MT_FIRST; + else + mt = MT_EXACT; + + err = zap_lookup_norm(mos, snapobj, name, 8, 1, + value, mt, NULL, 0, NULL); + if (err == ENOTSUP && mt == MT_FIRST) + err = zap_lookup(mos, snapobj, name, 8, 1, value); + return (err); +} + +static int +dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) +{ + objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; + uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; + matchtype_t mt; + int err; + + if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) + mt = MT_FIRST; + else + mt = MT_EXACT; + + err = zap_remove_norm(mos, snapobj, name, mt, tx); + if (err == ENOTSUP && mt == MT_FIRST) + err = zap_remove(mos, snapobj, name, tx); + return (err); +} + +static int +dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, + dsl_dataset_t **dsp) +{ + objset_t *mos = dp->dp_meta_objset; + dmu_buf_t *dbuf; + dsl_dataset_t *ds; + int err; + + ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || + dsl_pool_sync_context(dp)); + + err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); + if (err) + return (err); + ds = dmu_buf_get_user(dbuf); + if (ds == NULL) { + dsl_dataset_t *winner; + + ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); + ds->ds_dbuf = dbuf; + ds->ds_object = dsobj; + ds->ds_phys = dbuf->db_data; + + mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT, + NULL); + rw_init(&ds->ds_rwlock, 0, 0, 0); + cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); + + err = bplist_open(&ds->ds_deadlist, + mos, ds->ds_phys->ds_deadlist_obj); + if (err == 0) { + err = dsl_dir_open_obj(dp, + ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); + } + if (err) { + /* + * we don't really need to close the blist if we + * just opened it. + */ + mutex_destroy(&ds->ds_lock); + mutex_destroy(&ds->ds_opening_lock); + mutex_destroy(&ds->ds_deadlist.bpl_lock); + rw_destroy(&ds->ds_rwlock); + cv_destroy(&ds->ds_exclusive_cv); + kmem_free(ds, sizeof (dsl_dataset_t)); + dmu_buf_rele(dbuf, tag); + return (err); + } + + if (!dsl_dataset_is_snapshot(ds)) { + ds->ds_snapname[0] = '\0'; + if (ds->ds_phys->ds_prev_snap_obj) { + err = dsl_dataset_get_ref(dp, + ds->ds_phys->ds_prev_snap_obj, + ds, &ds->ds_prev); + } + + if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) { + dsl_dataset_t *origin; + + err = dsl_dataset_hold_obj(dp, + ds->ds_dir->dd_phys->dd_origin_obj, + FTAG, &origin); + if (err == 0) { + ds->ds_origin_txg = + origin->ds_phys->ds_creation_txg; + dsl_dataset_rele(origin, FTAG); + } + } + } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) { + err = dsl_dataset_get_snapname(ds); + } + + if (err == 0 && !dsl_dataset_is_snapshot(ds)) { + /* + * In sync context, we're called with either no lock + * or with the write lock. If we're not syncing, + * we're always called with the read lock held. + */ + boolean_t need_lock = + !RW_WRITE_HELD(&dp->dp_config_rwlock) && + dsl_pool_sync_context(dp); + + if (need_lock) + rw_enter(&dp->dp_config_rwlock, RW_READER); + + err = dsl_prop_get_ds(ds, + "refreservation", sizeof (uint64_t), 1, + &ds->ds_reserved, NULL); + if (err == 0) { + err = dsl_prop_get_ds(ds, + "refquota", sizeof (uint64_t), 1, + &ds->ds_quota, NULL); + } + + if (need_lock) + rw_exit(&dp->dp_config_rwlock); + } else { + ds->ds_reserved = ds->ds_quota = 0; + } + + if (err == 0) { + winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, + dsl_dataset_evict); + } + if (err || winner) { + bplist_close(&ds->ds_deadlist); + if (ds->ds_prev) + dsl_dataset_drop_ref(ds->ds_prev, ds); + dsl_dir_close(ds->ds_dir, ds); + mutex_destroy(&ds->ds_lock); + mutex_destroy(&ds->ds_opening_lock); + mutex_destroy(&ds->ds_deadlist.bpl_lock); + rw_destroy(&ds->ds_rwlock); + cv_destroy(&ds->ds_exclusive_cv); + kmem_free(ds, sizeof (dsl_dataset_t)); + if (err) { + dmu_buf_rele(dbuf, tag); + return (err); + } + ds = winner; + } else { + ds->ds_fsid_guid = + unique_insert(ds->ds_phys->ds_fsid_guid); + } + } + ASSERT3P(ds->ds_dbuf, ==, dbuf); + ASSERT3P(ds->ds_phys, ==, dbuf->db_data); + ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || + spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || + dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); + mutex_enter(&ds->ds_lock); + if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { + mutex_exit(&ds->ds_lock); + dmu_buf_rele(ds->ds_dbuf, tag); + return (ENOENT); + } + mutex_exit(&ds->ds_lock); + *dsp = ds; + return (0); +} + +static int +dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) +{ + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + /* + * In syncing context we don't want the rwlock lock: there + * may be an existing writer waiting for sync phase to + * finish. We don't need to worry about such writers, since + * sync phase is single-threaded, so the writer can't be + * doing anything while we are active. + */ + if (dsl_pool_sync_context(dp)) { + ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); + return (0); + } + + /* + * Normal users will hold the ds_rwlock as a READER until they + * are finished (i.e., call dsl_dataset_rele()). "Owners" will + * drop their READER lock after they set the ds_owner field. + * + * If the dataset is being destroyed, the destroy thread will + * obtain a WRITER lock for exclusive access after it's done its + * open-context work and then change the ds_owner to + * dsl_reaper once destruction is assured. So threads + * may block here temporarily, until the "destructability" of + * the dataset is determined. + */ + ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); + mutex_enter(&ds->ds_lock); + while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { + rw_exit(&dp->dp_config_rwlock); + cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); + if (DSL_DATASET_IS_DESTROYED(ds)) { + mutex_exit(&ds->ds_lock); + dsl_dataset_drop_ref(ds, tag); + rw_enter(&dp->dp_config_rwlock, RW_READER); + return (ENOENT); + } + rw_enter(&dp->dp_config_rwlock, RW_READER); + } + mutex_exit(&ds->ds_lock); + return (0); +} + +int +dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, + dsl_dataset_t **dsp) +{ + int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); + + if (err) + return (err); + return (dsl_dataset_hold_ref(*dsp, tag)); +} + +int +dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner, + dsl_dataset_t **dsp) +{ + int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp); + + ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER); + + if (err) + return (err); + if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { + dsl_dataset_rele(*dsp, owner); + return (EBUSY); + } + return (0); +} + +int +dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) +{ + dsl_dir_t *dd; + dsl_pool_t *dp; + const char *snapname; + uint64_t obj; + int err = 0; + + err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); + if (err) + return (err); + + dp = dd->dd_pool; + obj = dd->dd_phys->dd_head_dataset_obj; + rw_enter(&dp->dp_config_rwlock, RW_READER); + if (obj) + err = dsl_dataset_get_ref(dp, obj, tag, dsp); + else + err = ENOENT; + if (err) + goto out; + + err = dsl_dataset_hold_ref(*dsp, tag); + + /* we may be looking for a snapshot */ + if (err == 0 && snapname != NULL) { + dsl_dataset_t *ds = NULL; + + if (*snapname++ != '@') { + dsl_dataset_rele(*dsp, tag); + err = ENOENT; + goto out; + } + + dprintf("looking for snapshot '%s'\n", snapname); + err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); + if (err == 0) + err = dsl_dataset_get_ref(dp, obj, tag, &ds); + dsl_dataset_rele(*dsp, tag); + + ASSERT3U((err == 0), ==, (ds != NULL)); + + if (ds) { + mutex_enter(&ds->ds_lock); + if (ds->ds_snapname[0] == 0) + (void) strlcpy(ds->ds_snapname, snapname, + sizeof (ds->ds_snapname)); + mutex_exit(&ds->ds_lock); + err = dsl_dataset_hold_ref(ds, tag); + *dsp = err ? NULL : ds; + } + } +out: + rw_exit(&dp->dp_config_rwlock); + dsl_dir_close(dd, FTAG); + return (err); +} + +int +dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp) +{ + int err = dsl_dataset_hold(name, owner, dsp); + if (err) + return (err); + if ((*dsp)->ds_phys->ds_num_children > 0 && + !DS_MODE_IS_READONLY(flags)) { + dsl_dataset_rele(*dsp, owner); + return (EROFS); + } + if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { + dsl_dataset_rele(*dsp, owner); + return (EBUSY); + } + return (0); +} + +void +dsl_dataset_name(dsl_dataset_t *ds, char *name) +{ + if (ds == NULL) { + (void) strcpy(name, "mos"); + } else { + dsl_dir_name(ds->ds_dir, name); + VERIFY(0 == dsl_dataset_get_snapname(ds)); + if (ds->ds_snapname[0]) { + (void) strcat(name, "@"); + /* + * We use a "recursive" mutex so that we + * can call dprintf_ds() with ds_lock held. + */ + if (!MUTEX_HELD(&ds->ds_lock)) { + mutex_enter(&ds->ds_lock); + (void) strcat(name, ds->ds_snapname); + mutex_exit(&ds->ds_lock); + } else { + (void) strcat(name, ds->ds_snapname); + } + } + } +} + +static int +dsl_dataset_namelen(dsl_dataset_t *ds) +{ + int result; + + if (ds == NULL) { + result = 3; /* "mos" */ + } else { + result = dsl_dir_namelen(ds->ds_dir); + VERIFY(0 == dsl_dataset_get_snapname(ds)); + if (ds->ds_snapname[0]) { + ++result; /* adding one for the @-sign */ + if (!MUTEX_HELD(&ds->ds_lock)) { + mutex_enter(&ds->ds_lock); + result += strlen(ds->ds_snapname); + mutex_exit(&ds->ds_lock); + } else { + result += strlen(ds->ds_snapname); + } + } + } + + return (result); +} + +void +dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) +{ + dmu_buf_rele(ds->ds_dbuf, tag); +} + +void +dsl_dataset_rele(dsl_dataset_t *ds, void *tag) +{ + if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { + rw_exit(&ds->ds_rwlock); + } + dsl_dataset_drop_ref(ds, tag); +} + +void +dsl_dataset_disown(dsl_dataset_t *ds, void *owner) +{ + ASSERT((ds->ds_owner == owner && ds->ds_dbuf) || + (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); + + mutex_enter(&ds->ds_lock); + ds->ds_owner = NULL; + if (RW_WRITE_HELD(&ds->ds_rwlock)) { + rw_exit(&ds->ds_rwlock); + cv_broadcast(&ds->ds_exclusive_cv); + } + mutex_exit(&ds->ds_lock); + if (ds->ds_dbuf) + dsl_dataset_drop_ref(ds, owner); + else + dsl_dataset_evict(ds->ds_dbuf, ds); +} + +boolean_t +dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner) +{ + boolean_t gotit = FALSE; + + mutex_enter(&ds->ds_lock); + if (ds->ds_owner == NULL && + (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { + ds->ds_owner = owner; + if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) + rw_exit(&ds->ds_rwlock); + gotit = TRUE; + } + mutex_exit(&ds->ds_lock); + return (gotit); +} + +void +dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) +{ + ASSERT3P(owner, ==, ds->ds_owner); + if (!RW_WRITE_HELD(&ds->ds_rwlock)) + rw_enter(&ds->ds_rwlock, RW_WRITER); +} + +uint64_t +dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, + uint64_t flags, dmu_tx_t *tx) +{ + dsl_pool_t *dp = dd->dd_pool; + dmu_buf_t *dbuf; + dsl_dataset_phys_t *dsphys; + uint64_t dsobj; + objset_t *mos = dp->dp_meta_objset; + + if (origin == NULL) + origin = dp->dp_origin_snap; + + ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); + ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); + + dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, + DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); + VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); + dmu_buf_will_dirty(dbuf, tx); + dsphys = dbuf->db_data; + bzero(dsphys, sizeof (dsl_dataset_phys_t)); + dsphys->ds_dir_obj = dd->dd_object; + dsphys->ds_flags = flags; + dsphys->ds_fsid_guid = unique_create(); + (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, + sizeof (dsphys->ds_guid)); + dsphys->ds_snapnames_zapobj = + zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, + DMU_OT_NONE, 0, tx); + dsphys->ds_creation_time = gethrestime_sec(); + dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; + dsphys->ds_deadlist_obj = + bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); + + if (origin) { + dsphys->ds_prev_snap_obj = origin->ds_object; + dsphys->ds_prev_snap_txg = + origin->ds_phys->ds_creation_txg; + dsphys->ds_used_bytes = + origin->ds_phys->ds_used_bytes; + dsphys->ds_compressed_bytes = + origin->ds_phys->ds_compressed_bytes; + dsphys->ds_uncompressed_bytes = + origin->ds_phys->ds_uncompressed_bytes; + dsphys->ds_bp = origin->ds_phys->ds_bp; + dsphys->ds_flags |= origin->ds_phys->ds_flags; + + dmu_buf_will_dirty(origin->ds_dbuf, tx); + origin->ds_phys->ds_num_children++; + + if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { + if (origin->ds_phys->ds_next_clones_obj == 0) { + origin->ds_phys->ds_next_clones_obj = + zap_create(mos, + DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); + } + VERIFY(0 == zap_add_int(mos, + origin->ds_phys->ds_next_clones_obj, + dsobj, tx)); + } + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + dd->dd_phys->dd_origin_obj = origin->ds_object; + } + + if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) + dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; + + dmu_buf_rele(dbuf, FTAG); + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + dd->dd_phys->dd_head_dataset_obj = dsobj; + + return (dsobj); +} + +uint64_t +dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, + dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) +{ + dsl_pool_t *dp = pdd->dd_pool; + uint64_t dsobj, ddobj; + dsl_dir_t *dd; + + ASSERT(lastname[0] != '@'); + + ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); + VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); + + dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); + + dsl_deleg_set_create_perms(dd, tx, cr); + + dsl_dir_close(dd, FTAG); + + return (dsobj); +} + +struct destroyarg { + dsl_sync_task_group_t *dstg; + char *snapname; + char *failed; +}; + +static int +dsl_snapshot_destroy_one(char *name, void *arg) +{ + struct destroyarg *da = arg; + dsl_dataset_t *ds; + char *cp; + int err; + + (void) strcat(name, "@"); + (void) strcat(name, da->snapname); + err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT, + da->dstg, &ds); + cp = strchr(name, '@'); + *cp = '\0'; + if (err == 0) { + dsl_dataset_make_exclusive(ds, da->dstg); + if (ds->ds_user_ptr) { + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + ds->ds_user_ptr = NULL; + } + dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check, + dsl_dataset_destroy_sync, ds, da->dstg, 0); + } else if (err == ENOENT) { + err = 0; + } else { + (void) strcpy(da->failed, name); + } + return (err); +} + +/* + * Destroy 'snapname' in all descendants of 'fsname'. + */ +#pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy +int +dsl_snapshots_destroy(char *fsname, char *snapname) +{ + int err; + struct destroyarg da; + dsl_sync_task_t *dst; + spa_t *spa; + + err = spa_open(fsname, &spa, FTAG); + if (err) + return (err); + da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); + da.snapname = snapname; + da.failed = fsname; + + err = dmu_objset_find(fsname, + dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN); + + if (err == 0) + err = dsl_sync_task_group_wait(da.dstg); + + for (dst = list_head(&da.dstg->dstg_tasks); dst; + dst = list_next(&da.dstg->dstg_tasks, dst)) { + dsl_dataset_t *ds = dst->dst_arg1; + /* + * Return the file system name that triggered the error + */ + if (dst->dst_err) { + dsl_dataset_name(ds, fsname); + *strchr(fsname, '@') = '\0'; + } + dsl_dataset_disown(ds, da.dstg); + } + + dsl_sync_task_group_destroy(da.dstg); + spa_close(spa, FTAG); + return (err); +} + +/* + * ds must be opened as OWNER. On return (whether successful or not), + * ds will be closed and caller can no longer dereference it. + */ +int +dsl_dataset_destroy(dsl_dataset_t *ds, void *tag) +{ + int err; + dsl_sync_task_group_t *dstg; + objset_t *os; + dsl_dir_t *dd; + uint64_t obj; + + if (dsl_dataset_is_snapshot(ds)) { + /* Destroying a snapshot is simpler */ + dsl_dataset_make_exclusive(ds, tag); + + if (ds->ds_user_ptr) { + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + ds->ds_user_ptr = NULL; + } + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + dsl_dataset_destroy_check, dsl_dataset_destroy_sync, + ds, tag, 0); + goto out; + } + + dd = ds->ds_dir; + + /* + * Check for errors and mark this ds as inconsistent, in + * case we crash while freeing the objects. + */ + err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, + dsl_dataset_destroy_begin_sync, ds, NULL, 0); + if (err) + goto out; + + err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os); + if (err) + goto out; + + /* + * remove the objects in open context, so that we won't + * have too much to do in syncing context. + */ + for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, + ds->ds_phys->ds_prev_snap_txg)) { + /* + * Ignore errors, if there is not enough disk space + * we will deal with it in dsl_dataset_destroy_sync(). + */ + (void) dmu_free_object(os, obj); + } + + dmu_objset_close(os); + if (err != ESRCH) + goto out; + + rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); + err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); + rw_exit(&dd->dd_pool->dp_config_rwlock); + + if (err) + goto out; + + if (ds->ds_user_ptr) { + /* + * We need to sync out all in-flight IO before we try + * to evict (the dataset evict func is trying to clear + * the cached entries for this dataset in the ARC). + */ + txg_wait_synced(dd->dd_pool, 0); + } + + /* + * Blow away the dsl_dir + head dataset. + */ + dsl_dataset_make_exclusive(ds, tag); + if (ds->ds_user_ptr) { + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + ds->ds_user_ptr = NULL; + } + dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); + dsl_sync_task_create(dstg, dsl_dataset_destroy_check, + dsl_dataset_destroy_sync, ds, tag, 0); + dsl_sync_task_create(dstg, dsl_dir_destroy_check, + dsl_dir_destroy_sync, dd, FTAG, 0); + err = dsl_sync_task_group_wait(dstg); + dsl_sync_task_group_destroy(dstg); + /* if it is successful, dsl_dir_destroy_sync will close the dd */ + if (err) + dsl_dir_close(dd, FTAG); +out: + dsl_dataset_disown(ds, tag); + return (err); +} + +int +dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost) +{ + int err; + + ASSERT(ds->ds_owner); + + dsl_dataset_make_exclusive(ds, ds->ds_owner); + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + dsl_dataset_rollback_check, dsl_dataset_rollback_sync, + ds, &ost, 0); + /* drop exclusive access */ + mutex_enter(&ds->ds_lock); + rw_exit(&ds->ds_rwlock); + cv_broadcast(&ds->ds_exclusive_cv); + mutex_exit(&ds->ds_lock); + return (err); +} + +void * +dsl_dataset_set_user_ptr(dsl_dataset_t *ds, + void *p, dsl_dataset_evict_func_t func) +{ + void *old; + + mutex_enter(&ds->ds_lock); + old = ds->ds_user_ptr; + if (old == NULL) { + ds->ds_user_ptr = p; + ds->ds_user_evict_func = func; + } + mutex_exit(&ds->ds_lock); + return (old); +} + +void * +dsl_dataset_get_user_ptr(dsl_dataset_t *ds) +{ + return (ds->ds_user_ptr); +} + + +blkptr_t * +dsl_dataset_get_blkptr(dsl_dataset_t *ds) +{ + return (&ds->ds_phys->ds_bp); +} + +void +dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) +{ + ASSERT(dmu_tx_is_syncing(tx)); + /* If it's the meta-objset, set dp_meta_rootbp */ + if (ds == NULL) { + tx->tx_pool->dp_meta_rootbp = *bp; + } else { + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_bp = *bp; + } +} + +spa_t * +dsl_dataset_get_spa(dsl_dataset_t *ds) +{ + return (ds->ds_dir->dd_pool->dp_spa); +} + +void +dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) +{ + dsl_pool_t *dp; + + if (ds == NULL) /* this is the meta-objset */ + return; + + ASSERT(ds->ds_user_ptr != NULL); + + if (ds->ds_phys->ds_next_snap_obj != 0) + panic("dirtying snapshot!"); + + dp = ds->ds_dir->dd_pool; + + if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { + /* up the hold count until we can be written out */ + dmu_buf_add_ref(ds->ds_dbuf, ds); + } +} + +/* + * The unique space in the head dataset can be calculated by subtracting + * the space used in the most recent snapshot, that is still being used + * in this file system, from the space currently in use. To figure out + * the space in the most recent snapshot still in use, we need to take + * the total space used in the snapshot and subtract out the space that + * has been freed up since the snapshot was taken. + */ +static void +dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) +{ + uint64_t mrs_used; + uint64_t dlused, dlcomp, dluncomp; + + ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj); + + if (ds->ds_phys->ds_prev_snap_obj != 0) + mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; + else + mrs_used = 0; + + VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp, + &dluncomp)); + + ASSERT3U(dlused, <=, mrs_used); + ds->ds_phys->ds_unique_bytes = + ds->ds_phys->ds_used_bytes - (mrs_used - dlused); + + if (!DS_UNIQUE_IS_ACCURATE(ds) && + spa_version(ds->ds_dir->dd_pool->dp_spa) >= + SPA_VERSION_UNIQUE_ACCURATE) + ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; +} + +static uint64_t +dsl_dataset_unique(dsl_dataset_t *ds) +{ + if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds)) + dsl_dataset_recalc_head_uniq(ds); + + return (ds->ds_phys->ds_unique_bytes); +} + +struct killarg { + dsl_dataset_t *ds; + zio_t *zio; + dmu_tx_t *tx; +}; + +/* ARGSUSED */ +static int +kill_blkptr(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb, + const dnode_phys_t *dnp, void *arg) +{ + struct killarg *ka = arg; + + if (bp == NULL) + return (0); + + ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg); + (void) dsl_dataset_block_kill(ka->ds, bp, ka->zio, ka->tx); + + return (0); +} + +/* ARGSUSED */ +static int +dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + dmu_objset_type_t *ost = arg2; + + /* + * We can only roll back to emptyness if it is a ZPL objset. + */ + if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0) + return (EINVAL); + + /* + * This must not be a snapshot. + */ + if (ds->ds_phys->ds_next_snap_obj != 0) + return (EINVAL); + + /* + * If we made changes this txg, traverse_dataset won't find + * them. Try again. + */ + if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) + return (EAGAIN); + + return (0); +} + +/* ARGSUSED */ +static void +dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + dmu_objset_type_t *ost = arg2; + objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + + /* + * Before the roll back destroy the zil. + */ + if (ds->ds_user_ptr != NULL) { + zil_rollback_destroy( + ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx); + + /* + * We need to make sure that the objset_impl_t is reopened after + * we do the rollback, otherwise it will have the wrong + * objset_phys_t. Normally this would happen when this + * dataset-open is closed, thus causing the + * dataset to be immediately evicted. But when doing "zfs recv + * -F", we reopen the objset before that, so that there is no + * window where the dataset is closed and inconsistent. + */ + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + ds->ds_user_ptr = NULL; + } + + /* Transfer space that was freed since last snap back to the head. */ + { + uint64_t used; + + VERIFY(0 == bplist_space_birthrange(&ds->ds_deadlist, + ds->ds_origin_txg, UINT64_MAX, &used)); + dsl_dir_transfer_space(ds->ds_dir, used, + DD_USED_SNAP, DD_USED_HEAD, tx); + } + + /* Zero out the deadlist. */ + bplist_close(&ds->ds_deadlist); + bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); + ds->ds_phys->ds_deadlist_obj = + bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); + VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, + ds->ds_phys->ds_deadlist_obj)); + + { + /* Free blkptrs that we gave birth to */ + zio_t *zio; + struct killarg ka; + + zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL, + ZIO_FLAG_MUSTSUCCEED); + ka.ds = ds; + ka.zio = zio; + ka.tx = tx; + (void) traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg, + TRAVERSE_POST, kill_blkptr, &ka); + (void) zio_wait(zio); + } + + ASSERT(!(ds->ds_phys->ds_flags & DS_FLAG_UNIQUE_ACCURATE) || + ds->ds_phys->ds_unique_bytes == 0); + + if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) { + /* Change our contents to that of the prev snapshot */ + + ASSERT3U(ds->ds_prev->ds_object, ==, + ds->ds_phys->ds_prev_snap_obj); + ASSERT3U(ds->ds_phys->ds_used_bytes, <=, + ds->ds_prev->ds_phys->ds_used_bytes); + + ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp; + ds->ds_phys->ds_used_bytes = + ds->ds_prev->ds_phys->ds_used_bytes; + ds->ds_phys->ds_compressed_bytes = + ds->ds_prev->ds_phys->ds_compressed_bytes; + ds->ds_phys->ds_uncompressed_bytes = + ds->ds_prev->ds_phys->ds_uncompressed_bytes; + ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags; + + if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { + dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); + ds->ds_prev->ds_phys->ds_unique_bytes = 0; + } + } else { + objset_impl_t *osi; + + ASSERT3U(ds->ds_phys->ds_used_bytes, ==, 0); + ASSERT3U(ds->ds_phys->ds_compressed_bytes, ==, 0); + ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, ==, 0); + + bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t)); + ds->ds_phys->ds_flags = 0; + ds->ds_phys->ds_unique_bytes = 0; + if (spa_version(ds->ds_dir->dd_pool->dp_spa) >= + SPA_VERSION_UNIQUE_ACCURATE) + ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; + + osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds, + &ds->ds_phys->ds_bp, *ost, tx); +#ifdef _KERNEL + zfs_create_fs(&osi->os, kcred, NULL, tx); +#endif + } + + spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa, + tx, cr, "dataset = %llu", ds->ds_object); +} + +/* ARGSUSED */ +static int +dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; + uint64_t count; + int err; + + /* + * Can't delete a head dataset if there are snapshots of it. + * (Except if the only snapshots are from the branch we cloned + * from.) + */ + if (ds->ds_prev != NULL && + ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) + return (EINVAL); + + /* + * This is really a dsl_dir thing, but check it here so that + * we'll be less likely to leave this dataset inconsistent & + * nearly destroyed. + */ + err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); + if (err) + return (err); + if (count != 0) + return (EEXIST); + + return (0); +} + +/* ARGSUSED */ +static void +dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + /* Mark it as inconsistent on-disk, in case we crash */ + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; + + spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, + cr, "dataset = %llu", ds->ds_object); +} + +/* ARGSUSED */ +int +dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + + /* we have an owner hold, so noone else can destroy us */ + ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); + + /* Can't delete a branch point. */ + if (ds->ds_phys->ds_num_children > 1) + return (EEXIST); + + /* + * Can't delete a head dataset if there are snapshots of it. + * (Except if the only snapshots are from the branch we cloned + * from.) + */ + if (ds->ds_prev != NULL && + ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) + return (EINVAL); + + /* + * If we made changes this txg, traverse_dsl_dataset won't find + * them. Try again. + */ + if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) + return (EAGAIN); + + /* XXX we should do some i/o error checking... */ + return (0); +} + +struct refsarg { + kmutex_t lock; + boolean_t gone; + kcondvar_t cv; +}; + +/* ARGSUSED */ +static void +dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) +{ + struct refsarg *arg = argv; + + mutex_enter(&arg->lock); + arg->gone = TRUE; + cv_signal(&arg->cv); + mutex_exit(&arg->lock); +} + +static void +dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) +{ + struct refsarg arg; + + mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); + arg.gone = FALSE; + (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, + dsl_dataset_refs_gone); + dmu_buf_rele(ds->ds_dbuf, tag); + mutex_enter(&arg.lock); + while (!arg.gone) + cv_wait(&arg.cv, &arg.lock); + ASSERT(arg.gone); + mutex_exit(&arg.lock); + ds->ds_dbuf = NULL; + ds->ds_phys = NULL; + mutex_destroy(&arg.lock); + cv_destroy(&arg.cv); +} + +void +dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + zio_t *zio; + int err; + int after_branch_point = FALSE; + dsl_pool_t *dp = ds->ds_dir->dd_pool; + objset_t *mos = dp->dp_meta_objset; + dsl_dataset_t *ds_prev = NULL; + uint64_t obj; + + ASSERT(ds->ds_owner); + ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); + ASSERT(ds->ds_prev == NULL || + ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); + ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); + + /* signal any waiters that this dataset is going away */ + mutex_enter(&ds->ds_lock); + ds->ds_owner = dsl_reaper; + cv_broadcast(&ds->ds_exclusive_cv); + mutex_exit(&ds->ds_lock); + + /* Remove our reservation */ + if (ds->ds_reserved != 0) { + uint64_t val = 0; + dsl_dataset_set_reservation_sync(ds, &val, cr, tx); + ASSERT3U(ds->ds_reserved, ==, 0); + } + + ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); + + dsl_pool_ds_destroyed(ds, tx); + + obj = ds->ds_object; + + if (ds->ds_phys->ds_prev_snap_obj != 0) { + if (ds->ds_prev) { + ds_prev = ds->ds_prev; + } else { + VERIFY(0 == dsl_dataset_hold_obj(dp, + ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); + } + after_branch_point = + (ds_prev->ds_phys->ds_next_snap_obj != obj); + + dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); + if (after_branch_point && + ds_prev->ds_phys->ds_next_clones_obj != 0) { + VERIFY(0 == zap_remove_int(mos, + ds_prev->ds_phys->ds_next_clones_obj, obj, tx)); + if (ds->ds_phys->ds_next_snap_obj != 0) { + VERIFY(0 == zap_add_int(mos, + ds_prev->ds_phys->ds_next_clones_obj, + ds->ds_phys->ds_next_snap_obj, tx)); + } + } + if (after_branch_point && + ds->ds_phys->ds_next_snap_obj == 0) { + /* This clone is toast. */ + ASSERT(ds_prev->ds_phys->ds_num_children > 1); + ds_prev->ds_phys->ds_num_children--; + } else if (!after_branch_point) { + ds_prev->ds_phys->ds_next_snap_obj = + ds->ds_phys->ds_next_snap_obj; + } + } + + zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + + if (ds->ds_phys->ds_next_snap_obj != 0) { + blkptr_t bp; + dsl_dataset_t *ds_next; + uint64_t itor = 0; + uint64_t old_unique; + int64_t used = 0, compressed = 0, uncompressed = 0; + + VERIFY(0 == dsl_dataset_hold_obj(dp, + ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); + ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); + + old_unique = dsl_dataset_unique(ds_next); + + dmu_buf_will_dirty(ds_next->ds_dbuf, tx); + ds_next->ds_phys->ds_prev_snap_obj = + ds->ds_phys->ds_prev_snap_obj; + ds_next->ds_phys->ds_prev_snap_txg = + ds->ds_phys->ds_prev_snap_txg; + ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, + ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); + + /* + * Transfer to our deadlist (which will become next's + * new deadlist) any entries from next's current + * deadlist which were born before prev, and free the + * other entries. + * + * XXX we're doing this long task with the config lock held + */ + while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) { + if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) { + VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, + &bp, tx)); + if (ds_prev && !after_branch_point && + bp.blk_birth > + ds_prev->ds_phys->ds_prev_snap_txg) { + ds_prev->ds_phys->ds_unique_bytes += + bp_get_dasize(dp->dp_spa, &bp); + } + } else { + used += bp_get_dasize(dp->dp_spa, &bp); + compressed += BP_GET_PSIZE(&bp); + uncompressed += BP_GET_UCSIZE(&bp); + /* XXX check return value? */ + (void) dsl_free(zio, dp, tx->tx_txg, + &bp, NULL, NULL, ARC_NOWAIT); + } + } + + ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes); + + /* change snapused */ + dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, + -used, -compressed, -uncompressed, tx); + + /* free next's deadlist */ + bplist_close(&ds_next->ds_deadlist); + bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx); + + /* set next's deadlist to our deadlist */ + bplist_close(&ds->ds_deadlist); + ds_next->ds_phys->ds_deadlist_obj = + ds->ds_phys->ds_deadlist_obj; + VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos, + ds_next->ds_phys->ds_deadlist_obj)); + ds->ds_phys->ds_deadlist_obj = 0; + + if (ds_next->ds_phys->ds_next_snap_obj != 0) { + /* + * Update next's unique to include blocks which + * were previously shared by only this snapshot + * and it. Those blocks will be born after the + * prev snap and before this snap, and will have + * died after the next snap and before the one + * after that (ie. be on the snap after next's + * deadlist). + * + * XXX we're doing this long task with the + * config lock held + */ + dsl_dataset_t *ds_after_next; + uint64_t space; + + VERIFY(0 == dsl_dataset_hold_obj(dp, + ds_next->ds_phys->ds_next_snap_obj, + FTAG, &ds_after_next)); + + VERIFY(0 == + bplist_space_birthrange(&ds_after_next->ds_deadlist, + ds->ds_phys->ds_prev_snap_txg, + ds->ds_phys->ds_creation_txg, &space)); + ds_next->ds_phys->ds_unique_bytes += space; + + dsl_dataset_rele(ds_after_next, FTAG); + ASSERT3P(ds_next->ds_prev, ==, NULL); + } else { + ASSERT3P(ds_next->ds_prev, ==, ds); + dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); + ds_next->ds_prev = NULL; + if (ds_prev) { + VERIFY(0 == dsl_dataset_get_ref(dp, + ds->ds_phys->ds_prev_snap_obj, + ds_next, &ds_next->ds_prev)); + } + + dsl_dataset_recalc_head_uniq(ds_next); + + /* + * Reduce the amount of our unconsmed refreservation + * being charged to our parent by the amount of + * new unique data we have gained. + */ + if (old_unique < ds_next->ds_reserved) { + int64_t mrsdelta; + uint64_t new_unique = + ds_next->ds_phys->ds_unique_bytes; + + ASSERT(old_unique <= new_unique); + mrsdelta = MIN(new_unique - old_unique, + ds_next->ds_reserved - old_unique); + dsl_dir_diduse_space(ds->ds_dir, + DD_USED_REFRSRV, -mrsdelta, 0, 0, tx); + } + } + dsl_dataset_rele(ds_next, FTAG); + } else { + /* + * There's no next snapshot, so this is a head dataset. + * Destroy the deadlist. Unless it's a clone, the + * deadlist should be empty. (If it's a clone, it's + * safe to ignore the deadlist contents.) + */ + struct killarg ka; + + ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist)); + bplist_close(&ds->ds_deadlist); + bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); + ds->ds_phys->ds_deadlist_obj = 0; + + /* + * Free everything that we point to (that's born after + * the previous snapshot, if we are a clone) + * + * NB: this should be very quick, because we already + * freed all the objects in open context. + */ + ka.ds = ds; + ka.zio = zio; + ka.tx = tx; + err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg, + TRAVERSE_POST, kill_blkptr, &ka); + ASSERT3U(err, ==, 0); + ASSERT(spa_version(dp->dp_spa) < SPA_VERSION_UNIQUE_ACCURATE || + ds->ds_phys->ds_unique_bytes == 0); + } + + err = zio_wait(zio); + ASSERT3U(err, ==, 0); + + if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { + /* Erase the link in the dir */ + dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); + ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; + ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); + err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); + ASSERT(err == 0); + } else { + /* remove from snapshot namespace */ + dsl_dataset_t *ds_head; + ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); + VERIFY(0 == dsl_dataset_hold_obj(dp, + ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); + VERIFY(0 == dsl_dataset_get_snapname(ds)); +#ifdef ZFS_DEBUG + { + uint64_t val; + + err = dsl_dataset_snap_lookup(ds_head, + ds->ds_snapname, &val); + ASSERT3U(err, ==, 0); + ASSERT3U(val, ==, obj); + } +#endif + err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); + ASSERT(err == 0); + dsl_dataset_rele(ds_head, FTAG); + } + + if (ds_prev && ds->ds_prev != ds_prev) + dsl_dataset_rele(ds_prev, FTAG); + + spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); + spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx, + cr, "dataset = %llu", ds->ds_object); + + if (ds->ds_phys->ds_next_clones_obj != 0) { + uint64_t count; + ASSERT(0 == zap_count(mos, + ds->ds_phys->ds_next_clones_obj, &count) && count == 0); + VERIFY(0 == dmu_object_free(mos, + ds->ds_phys->ds_next_clones_obj, tx)); + } + if (ds->ds_phys->ds_props_obj != 0) + VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx)); + dsl_dir_close(ds->ds_dir, ds); + ds->ds_dir = NULL; + dsl_dataset_drain_refs(ds, tag); + VERIFY(0 == dmu_object_free(mos, obj, tx)); +} + +static int +dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) +{ + uint64_t asize; + + if (!dmu_tx_is_syncing(tx)) + return (0); + + /* + * If there's an fs-only reservation, any blocks that might become + * owned by the snapshot dataset must be accommodated by space + * outside of the reservation. + */ + asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved); + if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE)) + return (ENOSPC); + + /* + * Propogate any reserved space for this snapshot to other + * snapshot checks in this sync group. + */ + if (asize > 0) + dsl_dir_willuse_space(ds->ds_dir, asize, tx); + + return (0); +} + +/* ARGSUSED */ +int +dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + const char *snapname = arg2; + int err; + uint64_t value; + + /* + * We don't allow multiple snapshots of the same txg. If there + * is already one, try again. + */ + if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) + return (EAGAIN); + + /* + * Check for conflicting name snapshot name. + */ + err = dsl_dataset_snap_lookup(ds, snapname, &value); + if (err == 0) + return (EEXIST); + if (err != ENOENT) + return (err); + + /* + * Check that the dataset's name is not too long. Name consists + * of the dataset's length + 1 for the @-sign + snapshot name's length + */ + if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) + return (ENAMETOOLONG); + + err = dsl_dataset_snapshot_reserve_space(ds, tx); + if (err) + return (err); + + ds->ds_trysnap_txg = tx->tx_txg; + return (0); +} + +void +dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + const char *snapname = arg2; + dsl_pool_t *dp = ds->ds_dir->dd_pool; + dmu_buf_t *dbuf; + dsl_dataset_phys_t *dsphys; + uint64_t dsobj, crtxg; + objset_t *mos = dp->dp_meta_objset; + int err; + + ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); + + /* + * The origin's ds_creation_txg has to be < TXG_INITIAL + */ + if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) + crtxg = 1; + else + crtxg = tx->tx_txg; + + dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, + DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); + VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); + dmu_buf_will_dirty(dbuf, tx); + dsphys = dbuf->db_data; + bzero(dsphys, sizeof (dsl_dataset_phys_t)); + dsphys->ds_dir_obj = ds->ds_dir->dd_object; + dsphys->ds_fsid_guid = unique_create(); + (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, + sizeof (dsphys->ds_guid)); + dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; + dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; + dsphys->ds_next_snap_obj = ds->ds_object; + dsphys->ds_num_children = 1; + dsphys->ds_creation_time = gethrestime_sec(); + dsphys->ds_creation_txg = crtxg; + dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; + dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; + dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; + dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; + dsphys->ds_flags = ds->ds_phys->ds_flags; + dsphys->ds_bp = ds->ds_phys->ds_bp; + dmu_buf_rele(dbuf, FTAG); + + ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); + if (ds->ds_prev) { + uint64_t next_clones_obj = + ds->ds_prev->ds_phys->ds_next_clones_obj; + ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == + ds->ds_object || + ds->ds_prev->ds_phys->ds_num_children > 1); + if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { + dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); + ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, + ds->ds_prev->ds_phys->ds_creation_txg); + ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; + } else if (next_clones_obj != 0) { + VERIFY3U(0, ==, zap_remove_int(mos, + next_clones_obj, dsphys->ds_next_snap_obj, tx)); + VERIFY3U(0, ==, zap_add_int(mos, + next_clones_obj, dsobj, tx)); + } + } + + /* + * If we have a reference-reservation on this dataset, we will + * need to increase the amount of refreservation being charged + * since our unique space is going to zero. + */ + if (ds->ds_reserved) { + int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved); + dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, + add, 0, 0, tx); + } + + bplist_close(&ds->ds_deadlist); + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); + ds->ds_phys->ds_prev_snap_obj = dsobj; + ds->ds_phys->ds_prev_snap_txg = crtxg; + ds->ds_phys->ds_unique_bytes = 0; + if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) + ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; + ds->ds_phys->ds_deadlist_obj = + bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); + VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, + ds->ds_phys->ds_deadlist_obj)); + + dprintf("snap '%s' -> obj %llu\n", snapname, dsobj); + err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, + snapname, 8, 1, &dsobj, tx); + ASSERT(err == 0); + + if (ds->ds_prev) + dsl_dataset_drop_ref(ds->ds_prev, ds); + VERIFY(0 == dsl_dataset_get_ref(dp, + ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); + + dsl_pool_ds_snapshotted(ds, tx); + + spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr, + "dataset = %llu", dsobj); +} + +void +dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) +{ + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(ds->ds_user_ptr != NULL); + ASSERT(ds->ds_phys->ds_next_snap_obj == 0); + + /* + * in case we had to change ds_fsid_guid when we opened it, + * sync it out now. + */ + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; + + dsl_dir_dirty(ds->ds_dir, tx); + dmu_objset_sync(ds->ds_user_ptr, zio, tx); +} + +void +dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) +{ + uint64_t refd, avail, uobjs, aobjs; + + dsl_dir_stats(ds->ds_dir, nv); + + dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); + + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, + ds->ds_phys->ds_creation_time); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, + ds->ds_phys->ds_creation_txg); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, + ds->ds_quota); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, + ds->ds_reserved); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, + ds->ds_phys->ds_guid); + + if (ds->ds_phys->ds_next_snap_obj) { + /* + * This is a snapshot; override the dd's space used with + * our unique space and compression ratio. + */ + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, + ds->ds_phys->ds_unique_bytes); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, + ds->ds_phys->ds_compressed_bytes == 0 ? 100 : + (ds->ds_phys->ds_uncompressed_bytes * 100 / + ds->ds_phys->ds_compressed_bytes)); + } +} + +void +dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) +{ + stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; + stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; + stat->dds_guid = ds->ds_phys->ds_guid; + if (ds->ds_phys->ds_next_snap_obj) { + stat->dds_is_snapshot = B_TRUE; + stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; + } + + /* clone origin is really a dsl_dir thing... */ + rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); + if (dsl_dir_is_clone(ds->ds_dir)) { + dsl_dataset_t *ods; + + VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, + ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); + dsl_dataset_name(ods, stat->dds_origin); + dsl_dataset_drop_ref(ods, FTAG); + } + rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); +} + +uint64_t +dsl_dataset_fsid_guid(dsl_dataset_t *ds) +{ + return (ds->ds_fsid_guid); +} + +void +dsl_dataset_space(dsl_dataset_t *ds, + uint64_t *refdbytesp, uint64_t *availbytesp, + uint64_t *usedobjsp, uint64_t *availobjsp) +{ + *refdbytesp = ds->ds_phys->ds_used_bytes; + *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); + if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) + *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; + if (ds->ds_quota != 0) { + /* + * Adjust available bytes according to refquota + */ + if (*refdbytesp < ds->ds_quota) + *availbytesp = MIN(*availbytesp, + ds->ds_quota - *refdbytesp); + else + *availbytesp = 0; + } + *usedobjsp = ds->ds_phys->ds_bp.blk_fill; + *availobjsp = DN_MAX_OBJECT - *usedobjsp; +} + +boolean_t +dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) +{ + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || + dsl_pool_sync_context(dp)); + if (ds->ds_prev == NULL) + return (B_FALSE); + if (ds->ds_phys->ds_bp.blk_birth > + ds->ds_prev->ds_phys->ds_creation_txg) + return (B_TRUE); + return (B_FALSE); +} + +/* ARGSUSED */ +static int +dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + char *newsnapname = arg2; + dsl_dir_t *dd = ds->ds_dir; + dsl_dataset_t *hds; + uint64_t val; + int err; + + err = dsl_dataset_hold_obj(dd->dd_pool, + dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); + if (err) + return (err); + + /* new name better not be in use */ + err = dsl_dataset_snap_lookup(hds, newsnapname, &val); + dsl_dataset_rele(hds, FTAG); + + if (err == 0) + err = EEXIST; + else if (err == ENOENT) + err = 0; + + /* dataset name + 1 for the "@" + the new snapshot name must fit */ + if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) + err = ENAMETOOLONG; + + return (err); +} + +static void +dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, + cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + const char *newsnapname = arg2; + dsl_dir_t *dd = ds->ds_dir; + objset_t *mos = dd->dd_pool->dp_meta_objset; + dsl_dataset_t *hds; + int err; + + ASSERT(ds->ds_phys->ds_next_snap_obj != 0); + + VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, + dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); + + VERIFY(0 == dsl_dataset_get_snapname(ds)); + err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); + ASSERT3U(err, ==, 0); + mutex_enter(&ds->ds_lock); + (void) strcpy(ds->ds_snapname, newsnapname); + mutex_exit(&ds->ds_lock); + err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, + ds->ds_snapname, 8, 1, &ds->ds_object, tx); + ASSERT3U(err, ==, 0); + + spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, + cr, "dataset = %llu", ds->ds_object); + dsl_dataset_rele(hds, FTAG); +} + +struct renamesnaparg { + dsl_sync_task_group_t *dstg; + char failed[MAXPATHLEN]; + char *oldsnap; + char *newsnap; +}; + +static int +dsl_snapshot_rename_one(char *name, void *arg) +{ + struct renamesnaparg *ra = arg; + dsl_dataset_t *ds = NULL; + char *cp; + int err; + + cp = name + strlen(name); + *cp = '@'; + (void) strcpy(cp + 1, ra->oldsnap); + + /* + * For recursive snapshot renames the parent won't be changing + * so we just pass name for both the to/from argument. + */ + err = zfs_secpolicy_rename_perms(name, name, CRED()); + if (err == ENOENT) { + return (0); + } else if (err) { + (void) strcpy(ra->failed, name); + return (err); + } + +#ifdef _KERNEL + /* + * For all filesystems undergoing rename, we'll need to unmount it. + */ + (void) zfs_unmount_snap(name, NULL); +#endif + err = dsl_dataset_hold(name, ra->dstg, &ds); + *cp = '\0'; + if (err == ENOENT) { + return (0); + } else if (err) { + (void) strcpy(ra->failed, name); + return (err); + } + + dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, + dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); + + return (0); +} + +static int +dsl_recursive_rename(char *oldname, const char *newname) +{ + int err; + struct renamesnaparg *ra; + dsl_sync_task_t *dst; + spa_t *spa; + char *cp, *fsname = spa_strdup(oldname); + int len = strlen(oldname); + + /* truncate the snapshot name to get the fsname */ + cp = strchr(fsname, '@'); + *cp = '\0'; + + err = spa_open(fsname, &spa, FTAG); + if (err) { + kmem_free(fsname, len + 1); + return (err); + } + ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); + ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); + + ra->oldsnap = strchr(oldname, '@') + 1; + ra->newsnap = strchr(newname, '@') + 1; + *ra->failed = '\0'; + + err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, + DS_FIND_CHILDREN); + kmem_free(fsname, len + 1); + + if (err == 0) { + err = dsl_sync_task_group_wait(ra->dstg); + } + + for (dst = list_head(&ra->dstg->dstg_tasks); dst; + dst = list_next(&ra->dstg->dstg_tasks, dst)) { + dsl_dataset_t *ds = dst->dst_arg1; + if (dst->dst_err) { + dsl_dir_name(ds->ds_dir, ra->failed); + (void) strcat(ra->failed, "@"); + (void) strcat(ra->failed, ra->newsnap); + } + dsl_dataset_rele(ds, ra->dstg); + } + + if (err) + (void) strcpy(oldname, ra->failed); + + dsl_sync_task_group_destroy(ra->dstg); + kmem_free(ra, sizeof (struct renamesnaparg)); + spa_close(spa, FTAG); + return (err); +} + +static int +dsl_valid_rename(char *oldname, void *arg) +{ + int delta = *(int *)arg; + + if (strlen(oldname) + delta >= MAXNAMELEN) + return (ENAMETOOLONG); + + return (0); +} + +#pragma weak dmu_objset_rename = dsl_dataset_rename +int +dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive) +{ + dsl_dir_t *dd; + dsl_dataset_t *ds; + const char *tail; + int err; + + err = dsl_dir_open(oldname, FTAG, &dd, &tail); + if (err) + return (err); + if (tail == NULL) { + int delta = strlen(newname) - strlen(oldname); + + /* if we're growing, validate child name lengths */ + if (delta > 0) + err = dmu_objset_find(oldname, dsl_valid_rename, + &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); + + if (!err) + err = dsl_dir_rename(dd, newname); + dsl_dir_close(dd, FTAG); + return (err); + } + if (tail[0] != '@') { + /* the name ended in a nonexistant component */ + dsl_dir_close(dd, FTAG); + return (ENOENT); + } + + dsl_dir_close(dd, FTAG); + + /* new name must be snapshot in same filesystem */ + tail = strchr(newname, '@'); + if (tail == NULL) + return (EINVAL); + tail++; + if (strncmp(oldname, newname, tail - newname) != 0) + return (EXDEV); + + if (recursive) { + err = dsl_recursive_rename(oldname, newname); + } else { + err = dsl_dataset_hold(oldname, FTAG, &ds); + if (err) + return (err); + + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + dsl_dataset_snapshot_rename_check, + dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); + + dsl_dataset_rele(ds, FTAG); + } + + return (err); +} + +struct promotenode { + list_node_t link; + dsl_dataset_t *ds; +}; + +struct promotearg { + list_t shared_snaps, origin_snaps, clone_snaps; + dsl_dataset_t *origin_origin, *origin_head; + uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap; +}; + +static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep); + +/* ARGSUSED */ +static int +dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *hds = arg1; + struct promotearg *pa = arg2; + struct promotenode *snap = list_head(&pa->shared_snaps); + dsl_dataset_t *origin_ds = snap->ds; + int err; + + /* Check that it is a real clone */ + if (!dsl_dir_is_clone(hds->ds_dir)) + return (EINVAL); + + /* Since this is so expensive, don't do the preliminary check */ + if (!dmu_tx_is_syncing(tx)) + return (0); + + if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) + return (EXDEV); + + /* compute origin's new unique space */ + snap = list_tail(&pa->clone_snaps); + ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); + err = bplist_space_birthrange(&snap->ds->ds_deadlist, + origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique); + if (err) + return (err); + + /* + * Walk the snapshots that we are moving + * + * Compute space to transfer. Consider the incremental changes + * to used for each snapshot: + * (my used) = (prev's used) + (blocks born) - (blocks killed) + * So each snapshot gave birth to: + * (blocks born) = (my used) - (prev's used) + (blocks killed) + * So a sequence would look like: + * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0) + * Which simplifies to: + * uN + kN + kN-1 + ... + k1 + k0 + * Note however, if we stop before we reach the ORIGIN we get: + * uN + kN + kN-1 + ... + kM - uM-1 + */ + pa->used = origin_ds->ds_phys->ds_used_bytes; + pa->comp = origin_ds->ds_phys->ds_compressed_bytes; + pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; + for (snap = list_head(&pa->shared_snaps); snap; + snap = list_next(&pa->shared_snaps, snap)) { + uint64_t val, dlused, dlcomp, dluncomp; + dsl_dataset_t *ds = snap->ds; + + /* Check that the snapshot name does not conflict */ + VERIFY(0 == dsl_dataset_get_snapname(ds)); + err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); + if (err == 0) + return (EEXIST); + if (err != ENOENT) + return (err); + + /* The very first snapshot does not have a deadlist */ + if (ds->ds_phys->ds_prev_snap_obj == 0) + continue; + + if (err = bplist_space(&ds->ds_deadlist, + &dlused, &dlcomp, &dluncomp)) + return (err); + pa->used += dlused; + pa->comp += dlcomp; + pa->uncomp += dluncomp; + } + + /* + * If we are a clone of a clone then we never reached ORIGIN, + * so we need to subtract out the clone origin's used space. + */ + if (pa->origin_origin) { + pa->used -= pa->origin_origin->ds_phys->ds_used_bytes; + pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes; + pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes; + } + + /* Check that there is enough space here */ + err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir, + pa->used); + if (err) + return (err); + + /* + * Compute the amounts of space that will be used by snapshots + * after the promotion (for both origin and clone). For each, + * it is the amount of space that will be on all of their + * deadlists (that was not born before their new origin). + */ + if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { + uint64_t space; + + /* + * Note, typically this will not be a clone of a clone, + * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so + * these snaplist_space() -> bplist_space_birthrange() + * calls will be fast because they do not have to + * iterate over all bps. + */ + snap = list_head(&pa->origin_snaps); + err = snaplist_space(&pa->shared_snaps, + snap->ds->ds_origin_txg, &pa->cloneusedsnap); + if (err) + return (err); + + err = snaplist_space(&pa->clone_snaps, + snap->ds->ds_origin_txg, &space); + if (err) + return (err); + pa->cloneusedsnap += space; + } + if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { + err = snaplist_space(&pa->origin_snaps, + origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap); + if (err) + return (err); + } + + return (0); +} + +static void +dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *hds = arg1; + struct promotearg *pa = arg2; + struct promotenode *snap = list_head(&pa->shared_snaps); + dsl_dataset_t *origin_ds = snap->ds; + dsl_dataset_t *origin_head; + dsl_dir_t *dd = hds->ds_dir; + dsl_pool_t *dp = hds->ds_dir->dd_pool; + dsl_dir_t *odd = NULL; + uint64_t oldnext_obj; + int64_t delta; + + ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); + + snap = list_head(&pa->origin_snaps); + origin_head = snap->ds; + + /* + * We need to explicitly open odd, since origin_ds's dd will be + * changing. + */ + VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, + NULL, FTAG, &odd)); + + /* change origin's next snap */ + dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); + oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; + snap = list_tail(&pa->clone_snaps); + ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); + origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object; + + /* change the origin's next clone */ + if (origin_ds->ds_phys->ds_next_clones_obj) { + VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, + origin_ds->ds_phys->ds_next_clones_obj, + origin_ds->ds_phys->ds_next_snap_obj, tx)); + VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, + origin_ds->ds_phys->ds_next_clones_obj, + oldnext_obj, tx)); + } + + /* change origin */ + dmu_buf_will_dirty(dd->dd_dbuf, tx); + ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); + dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; + hds->ds_origin_txg = origin_head->ds_origin_txg; + dmu_buf_will_dirty(odd->dd_dbuf, tx); + odd->dd_phys->dd_origin_obj = origin_ds->ds_object; + origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg; + + /* move snapshots to this dir */ + for (snap = list_head(&pa->shared_snaps); snap; + snap = list_next(&pa->shared_snaps, snap)) { + dsl_dataset_t *ds = snap->ds; + + /* unregister props as dsl_dir is changing */ + if (ds->ds_user_ptr) { + ds->ds_user_evict_func(ds, ds->ds_user_ptr); + ds->ds_user_ptr = NULL; + } + /* move snap name entry */ + VERIFY(0 == dsl_dataset_get_snapname(ds)); + VERIFY(0 == dsl_dataset_snap_remove(origin_head, + ds->ds_snapname, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, + hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, + 8, 1, &ds->ds_object, tx)); + /* change containing dsl_dir */ + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); + ds->ds_phys->ds_dir_obj = dd->dd_object; + ASSERT3P(ds->ds_dir, ==, odd); + dsl_dir_close(ds->ds_dir, ds); + VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, + NULL, ds, &ds->ds_dir)); + + ASSERT3U(dsl_prop_numcb(ds), ==, 0); + } + + /* + * Change space accounting. + * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either + * both be valid, or both be 0 (resulting in delta == 0). This + * is true for each of {clone,origin} independently. + */ + + delta = pa->cloneusedsnap - + dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; + ASSERT3S(delta, >=, 0); + ASSERT3U(pa->used, >=, delta); + dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx); + dsl_dir_diduse_space(dd, DD_USED_HEAD, + pa->used - delta, pa->comp, pa->uncomp, tx); + + delta = pa->originusedsnap - + odd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; + ASSERT3S(delta, <=, 0); + ASSERT3U(pa->used, >=, -delta); + dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx); + dsl_dir_diduse_space(odd, DD_USED_HEAD, + -pa->used - delta, -pa->comp, -pa->uncomp, tx); + + origin_ds->ds_phys->ds_unique_bytes = pa->unique; + + /* log history record */ + spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, + cr, "dataset = %llu", hds->ds_object); + + dsl_dir_close(odd, FTAG); +} + +static char *snaplist_tag = "snaplist"; +/* + * Make a list of dsl_dataset_t's for the snapshots between first_obj + * (exclusive) and last_obj (inclusive). The list will be in reverse + * order (last_obj will be the list_head()). If first_obj == 0, do all + * snapshots back to this dataset's origin. + */ +static int +snaplist_make(dsl_pool_t *dp, boolean_t own, + uint64_t first_obj, uint64_t last_obj, list_t *l) +{ + uint64_t obj = last_obj; + + ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock)); + + list_create(l, sizeof (struct promotenode), + offsetof(struct promotenode, link)); + + while (obj != first_obj) { + dsl_dataset_t *ds; + struct promotenode *snap; + int err; + + if (own) { + err = dsl_dataset_own_obj(dp, obj, + 0, snaplist_tag, &ds); + if (err == 0) + dsl_dataset_make_exclusive(ds, snaplist_tag); + } else { + err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds); + } + if (err == ENOENT) { + /* lost race with snapshot destroy */ + struct promotenode *last = list_tail(l); + ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj); + obj = last->ds->ds_phys->ds_prev_snap_obj; + continue; + } else if (err) { + return (err); + } + + if (first_obj == 0) + first_obj = ds->ds_dir->dd_phys->dd_origin_obj; + + snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); + snap->ds = ds; + list_insert_tail(l, snap); + obj = ds->ds_phys->ds_prev_snap_obj; + } + + return (0); +} + +static int +snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep) +{ + struct promotenode *snap; + + *spacep = 0; + for (snap = list_head(l); snap; snap = list_next(l, snap)) { + uint64_t used; + int err = bplist_space_birthrange(&snap->ds->ds_deadlist, + mintxg, UINT64_MAX, &used); + if (err) + return (err); + *spacep += used; + } + return (0); +} + +static void +snaplist_destroy(list_t *l, boolean_t own) +{ + struct promotenode *snap; + + if (!list_link_active(&l->list_head)) + return; + + while ((snap = list_tail(l)) != NULL) { + list_remove(l, snap); + if (own) + dsl_dataset_disown(snap->ds, snaplist_tag); + else + dsl_dataset_rele(snap->ds, snaplist_tag); + kmem_free(snap, sizeof (struct promotenode)); + } + list_destroy(l); +} + +/* + * Promote a clone. Nomenclature note: + * "clone" or "cds": the original clone which is being promoted + * "origin" or "ods": the snapshot which is originally clone's origin + * "origin head" or "ohds": the dataset which is the head + * (filesystem/volume) for the origin + * "origin origin": the origin of the origin's filesystem (typically + * NULL, indicating that the clone is not a clone of a clone). + */ +int +dsl_dataset_promote(const char *name) +{ + dsl_dataset_t *ds; + dsl_dir_t *dd; + dsl_pool_t *dp; + dmu_object_info_t doi; + struct promotearg pa = { 0 }; + struct promotenode *snap; + int err; + + err = dsl_dataset_hold(name, FTAG, &ds); + if (err) + return (err); + dd = ds->ds_dir; + dp = dd->dd_pool; + + err = dmu_object_info(dp->dp_meta_objset, + ds->ds_phys->ds_snapnames_zapobj, &doi); + if (err) { + dsl_dataset_rele(ds, FTAG); + return (err); + } + + if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) { + dsl_dataset_rele(ds, FTAG); + return (EINVAL); + } + + /* + * We are going to inherit all the snapshots taken before our + * origin (i.e., our new origin will be our parent's origin). + * Take ownership of them so that we can rename them into our + * namespace. + */ + rw_enter(&dp->dp_config_rwlock, RW_READER); + + err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj, + &pa.shared_snaps); + if (err != 0) + goto out; + + err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps); + if (err != 0) + goto out; + + snap = list_head(&pa.shared_snaps); + ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj); + err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj, + snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps); + if (err != 0) + goto out; + + if (dsl_dir_is_clone(snap->ds->ds_dir)) { + err = dsl_dataset_own_obj(dp, + snap->ds->ds_dir->dd_phys->dd_origin_obj, + 0, FTAG, &pa.origin_origin); + if (err != 0) + goto out; + } + +out: + rw_exit(&dp->dp_config_rwlock); + + /* + * Add in 128x the snapnames zapobj size, since we will be moving + * a bunch of snapnames to the promoted ds, and dirtying their + * bonus buffers. + */ + if (err == 0) { + err = dsl_sync_task_do(dp, dsl_dataset_promote_check, + dsl_dataset_promote_sync, ds, &pa, + 2 + 2 * doi.doi_physical_blks); + } + + snaplist_destroy(&pa.shared_snaps, B_TRUE); + snaplist_destroy(&pa.clone_snaps, B_FALSE); + snaplist_destroy(&pa.origin_snaps, B_FALSE); + if (pa.origin_origin) + dsl_dataset_disown(pa.origin_origin, FTAG); + dsl_dataset_rele(ds, FTAG); + return (err); +} + +struct cloneswaparg { + dsl_dataset_t *cds; /* clone dataset */ + dsl_dataset_t *ohds; /* origin's head dataset */ + boolean_t force; + int64_t unused_refres_delta; /* change in unconsumed refreservation */ +}; + +/* ARGSUSED */ +static int +dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + struct cloneswaparg *csa = arg1; + + /* they should both be heads */ + if (dsl_dataset_is_snapshot(csa->cds) || + dsl_dataset_is_snapshot(csa->ohds)) + return (EINVAL); + + /* the branch point should be just before them */ + if (csa->cds->ds_prev != csa->ohds->ds_prev) + return (EINVAL); + + /* cds should be the clone */ + if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj != + csa->ohds->ds_object) + return (EINVAL); + + /* the clone should be a child of the origin */ + if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) + return (EINVAL); + + /* ohds shouldn't be modified unless 'force' */ + if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) + return (ETXTBSY); + + /* adjust amount of any unconsumed refreservation */ + csa->unused_refres_delta = + (int64_t)MIN(csa->ohds->ds_reserved, + csa->ohds->ds_phys->ds_unique_bytes) - + (int64_t)MIN(csa->ohds->ds_reserved, + csa->cds->ds_phys->ds_unique_bytes); + + if (csa->unused_refres_delta > 0 && + csa->unused_refres_delta > + dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) + return (ENOSPC); + + return (0); +} + +/* ARGSUSED */ +static void +dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + struct cloneswaparg *csa = arg1; + dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; + + ASSERT(csa->cds->ds_reserved == 0); + ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota); + + dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); + dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); + dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx); + + if (csa->cds->ds_user_ptr != NULL) { + csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr); + csa->cds->ds_user_ptr = NULL; + } + + if (csa->ohds->ds_user_ptr != NULL) { + csa->ohds->ds_user_evict_func(csa->ohds, + csa->ohds->ds_user_ptr); + csa->ohds->ds_user_ptr = NULL; + } + + /* reset origin's unique bytes */ + VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist, + csa->cds->ds_prev->ds_phys->ds_prev_snap_txg, UINT64_MAX, + &csa->cds->ds_prev->ds_phys->ds_unique_bytes)); + + /* swap blkptrs */ + { + blkptr_t tmp; + tmp = csa->ohds->ds_phys->ds_bp; + csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; + csa->cds->ds_phys->ds_bp = tmp; + } + + /* set dd_*_bytes */ + { + int64_t dused, dcomp, duncomp; + uint64_t cdl_used, cdl_comp, cdl_uncomp; + uint64_t odl_used, odl_comp, odl_uncomp; + + ASSERT3U(csa->cds->ds_dir->dd_phys-> + dd_used_breakdown[DD_USED_SNAP], ==, 0); + + VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used, + &cdl_comp, &cdl_uncomp)); + VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used, + &odl_comp, &odl_uncomp)); + + dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - + (csa->ohds->ds_phys->ds_used_bytes + odl_used); + dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - + (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); + duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + + cdl_uncomp - + (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); + + dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD, + dused, dcomp, duncomp, tx); + dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD, + -dused, -dcomp, -duncomp, tx); + + /* + * The difference in the space used by snapshots is the + * difference in snapshot space due to the head's + * deadlist (since that's the only thing that's + * changing that affects the snapused). + */ + VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist, + csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used)); + VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist, + csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used)); + dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used, + DD_USED_HEAD, DD_USED_SNAP, tx); + } + +#define SWITCH64(x, y) \ + { \ + uint64_t __tmp = (x); \ + (x) = (y); \ + (y) = __tmp; \ + } + + /* swap ds_*_bytes */ + SWITCH64(csa->ohds->ds_phys->ds_used_bytes, + csa->cds->ds_phys->ds_used_bytes); + SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, + csa->cds->ds_phys->ds_compressed_bytes); + SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, + csa->cds->ds_phys->ds_uncompressed_bytes); + SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, + csa->cds->ds_phys->ds_unique_bytes); + + /* apply any parent delta for change in unconsumed refreservation */ + dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV, + csa->unused_refres_delta, 0, 0, tx); + + /* swap deadlists */ + bplist_close(&csa->cds->ds_deadlist); + bplist_close(&csa->ohds->ds_deadlist); + SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, + csa->cds->ds_phys->ds_deadlist_obj); + VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, + csa->cds->ds_phys->ds_deadlist_obj)); + VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, + csa->ohds->ds_phys->ds_deadlist_obj)); + + dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx); +} + +/* + * Swap 'clone' with its origin head file system. Used at the end + * of "online recv" to swizzle the file system to the new version. + */ +int +dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, + boolean_t force) +{ + struct cloneswaparg csa; + int error; + + ASSERT(clone->ds_owner); + ASSERT(origin_head->ds_owner); +retry: + /* Need exclusive access for the swap */ + rw_enter(&clone->ds_rwlock, RW_WRITER); + if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { + rw_exit(&clone->ds_rwlock); + rw_enter(&origin_head->ds_rwlock, RW_WRITER); + if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { + rw_exit(&origin_head->ds_rwlock); + goto retry; + } + } + csa.cds = clone; + csa.ohds = origin_head; + csa.force = force; + error = dsl_sync_task_do(clone->ds_dir->dd_pool, + dsl_dataset_clone_swap_check, + dsl_dataset_clone_swap_sync, &csa, NULL, 9); + return (error); +} + +/* + * Given a pool name and a dataset object number in that pool, + * return the name of that dataset. + */ +int +dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) +{ + spa_t *spa; + dsl_pool_t *dp; + dsl_dataset_t *ds; + int error; + + if ((error = spa_open(pname, &spa, FTAG)) != 0) + return (error); + dp = spa_get_dsl(spa); + rw_enter(&dp->dp_config_rwlock, RW_READER); + if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { + dsl_dataset_name(ds, buf); + dsl_dataset_rele(ds, FTAG); + } + rw_exit(&dp->dp_config_rwlock); + spa_close(spa, FTAG); + + return (error); +} + +int +dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, + uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) +{ + int error = 0; + + ASSERT3S(asize, >, 0); + + /* + * *ref_rsrv is the portion of asize that will come from any + * unconsumed refreservation space. + */ + *ref_rsrv = 0; + + mutex_enter(&ds->ds_lock); + /* + * Make a space adjustment for reserved bytes. + */ + if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { + ASSERT3U(*used, >=, + ds->ds_reserved - ds->ds_phys->ds_unique_bytes); + *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); + *ref_rsrv = + asize - MIN(asize, parent_delta(ds, asize + inflight)); + } + + if (!check_quota || ds->ds_quota == 0) { + mutex_exit(&ds->ds_lock); + return (0); + } + /* + * If they are requesting more space, and our current estimate + * is over quota, they get to try again unless the actual + * on-disk is over quota and there are no pending changes (which + * may free up space for us). + */ + if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { + if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) + error = ERESTART; + else + error = EDQUOT; + } + mutex_exit(&ds->ds_lock); + + return (error); +} + +/* ARGSUSED */ +static int +dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + uint64_t *quotap = arg2; + uint64_t new_quota = *quotap; + + if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) + return (ENOTSUP); + + if (new_quota == 0) + return (0); + + if (new_quota < ds->ds_phys->ds_used_bytes || + new_quota < ds->ds_reserved) + return (ENOSPC); + + return (0); +} + +/* ARGSUSED */ +void +dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + uint64_t *quotap = arg2; + uint64_t new_quota = *quotap; + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + + ds->ds_quota = new_quota; + + dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx); + + spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa, + tx, cr, "%lld dataset = %llu ", + (longlong_t)new_quota, ds->ds_object); +} + +int +dsl_dataset_set_quota(const char *dsname, uint64_t quota) +{ + dsl_dataset_t *ds; + int err; + + err = dsl_dataset_hold(dsname, FTAG, &ds); + if (err) + return (err); + + if (quota != ds->ds_quota) { + /* + * If someone removes a file, then tries to set the quota, we + * want to make sure the file freeing takes effect. + */ + txg_wait_open(ds->ds_dir->dd_pool, 0); + + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, + ds, "a, 0); + } + dsl_dataset_rele(ds, FTAG); + return (err); +} + +static int +dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + uint64_t *reservationp = arg2; + uint64_t new_reservation = *reservationp; + int64_t delta; + uint64_t unique; + + if (new_reservation > INT64_MAX) + return (EOVERFLOW); + + if (spa_version(ds->ds_dir->dd_pool->dp_spa) < + SPA_VERSION_REFRESERVATION) + return (ENOTSUP); + + if (dsl_dataset_is_snapshot(ds)) + return (EINVAL); + + /* + * If we are doing the preliminary check in open context, the + * space estimates may be inaccurate. + */ + if (!dmu_tx_is_syncing(tx)) + return (0); + + mutex_enter(&ds->ds_lock); + unique = dsl_dataset_unique(ds); + delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved); + mutex_exit(&ds->ds_lock); + + if (delta > 0 && + delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) + return (ENOSPC); + if (delta > 0 && ds->ds_quota > 0 && + new_reservation > ds->ds_quota) + return (ENOSPC); + + return (0); +} + +/* ARGSUSED */ +static void +dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, + dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + uint64_t *reservationp = arg2; + uint64_t new_reservation = *reservationp; + uint64_t unique; + int64_t delta; + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + + mutex_enter(&ds->ds_dir->dd_lock); + mutex_enter(&ds->ds_lock); + unique = dsl_dataset_unique(ds); + delta = MAX(0, (int64_t)(new_reservation - unique)) - + MAX(0, (int64_t)(ds->ds_reserved - unique)); + ds->ds_reserved = new_reservation; + mutex_exit(&ds->ds_lock); + + dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx); + mutex_exit(&ds->ds_dir->dd_lock); + dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation", + new_reservation, cr, tx); + + spa_history_internal_log(LOG_DS_REFRESERV, + ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu", + (longlong_t)new_reservation, ds->ds_object); +} + +int +dsl_dataset_set_reservation(const char *dsname, uint64_t reservation) +{ + dsl_dataset_t *ds; + int err; + + err = dsl_dataset_hold(dsname, FTAG, &ds); + if (err) + return (err); + + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + dsl_dataset_set_reservation_check, + dsl_dataset_set_reservation_sync, ds, &reservation, 0); + dsl_dataset_rele(ds, FTAG); + return (err); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_deleg.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_deleg.c new file mode 100644 index 000000000000..da5d15787570 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_deleg.c @@ -0,0 +1,735 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * DSL permissions are stored in a two level zap attribute + * mechanism. The first level identifies the "class" of + * entry. The class is identified by the first 2 letters of + * the attribute. The second letter "l" or "d" identifies whether + * it is a local or descendent permission. The first letter + * identifies the type of entry. + * + * ul$ identifies permissions granted locally for this userid. + * ud$ identifies permissions granted on descendent datasets for + * this userid. + * Ul$ identifies permission sets granted locally for this userid. + * Ud$ identifies permission sets granted on descendent datasets for + * this userid. + * gl$ identifies permissions granted locally for this groupid. + * gd$ identifies permissions granted on descendent datasets for + * this groupid. + * Gl$ identifies permission sets granted locally for this groupid. + * Gd$ identifies permission sets granted on descendent datasets for + * this groupid. + * el$ identifies permissions granted locally for everyone. + * ed$ identifies permissions granted on descendent datasets + * for everyone. + * El$ identifies permission sets granted locally for everyone. + * Ed$ identifies permission sets granted to descendent datasets for + * everyone. + * c-$ identifies permission to create at dataset creation time. + * C-$ identifies permission sets to grant locally at dataset creation + * time. + * s-$@ permissions defined in specified set @ + * S-$@ Sets defined in named set @ + * + * Each of the above entities points to another zap attribute that contains one + * attribute for each allowed permission, such as create, destroy,... + * All of the "upper" case class types will specify permission set names + * rather than permissions. + * + * Basically it looks something like this: + * ul$12 -> ZAP OBJ -> permissions... + * + * The ZAP OBJ is referred to as the jump object. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for the default checksum value */ +#include +#include +#include +#include + +#include "zfs_deleg.h" + +/* + * Validate that user is allowed to delegate specified permissions. + * + * In order to delegate "create" you must have "create" + * and "allow". + */ +int +dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr) +{ + nvpair_t *whopair = NULL; + int error; + + if ((error = dsl_deleg_access(ddname, ZFS_DELEG_PERM_ALLOW, cr)) != 0) + return (error); + + while (whopair = nvlist_next_nvpair(nvp, whopair)) { + nvlist_t *perms; + nvpair_t *permpair = NULL; + + VERIFY(nvpair_value_nvlist(whopair, &perms) == 0); + + while (permpair = nvlist_next_nvpair(perms, permpair)) { + const char *perm = nvpair_name(permpair); + + if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0) + return (EPERM); + + if ((error = dsl_deleg_access(ddname, perm, cr)) != 0) + return (error); + } + } + return (0); +} + +/* + * Validate that user is allowed to unallow specified permissions. They + * must have the 'allow' permission, and even then can only unallow + * perms for their uid. + */ +int +dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr) +{ + nvpair_t *whopair = NULL; + int error; + char idstr[32]; + + if ((error = dsl_deleg_access(ddname, ZFS_DELEG_PERM_ALLOW, cr)) != 0) + return (error); + + (void) snprintf(idstr, sizeof (idstr), "%lld", + (longlong_t)crgetuid(cr)); + + while (whopair = nvlist_next_nvpair(nvp, whopair)) { + zfs_deleg_who_type_t type = nvpair_name(whopair)[0]; + + if (type != ZFS_DELEG_USER && + type != ZFS_DELEG_USER_SETS) + return (EPERM); + + if (strcmp(idstr, &nvpair_name(whopair)[3]) != 0) + return (EPERM); + } + return (0); +} + +static void +dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + nvlist_t *nvp = arg2; + objset_t *mos = dd->dd_pool->dp_meta_objset; + nvpair_t *whopair = NULL; + uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; + + if (zapobj == 0) { + dmu_buf_will_dirty(dd->dd_dbuf, tx); + zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos, + DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); + } + + while (whopair = nvlist_next_nvpair(nvp, whopair)) { + const char *whokey = nvpair_name(whopair); + nvlist_t *perms; + nvpair_t *permpair = NULL; + uint64_t jumpobj; + + VERIFY(nvpair_value_nvlist(whopair, &perms) == 0); + + if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) { + jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, + DMU_OT_NONE, 0, tx); + VERIFY(zap_update(mos, zapobj, + whokey, 8, 1, &jumpobj, tx) == 0); + } + + while (permpair = nvlist_next_nvpair(perms, permpair)) { + const char *perm = nvpair_name(permpair); + uint64_t n = 0; + + VERIFY(zap_update(mos, jumpobj, + perm, 8, 1, &n, tx) == 0); + spa_history_internal_log(LOG_DS_PERM_UPDATE, + dd->dd_pool->dp_spa, tx, cr, + "%s %s dataset = %llu", whokey, perm, + dd->dd_phys->dd_head_dataset_obj); + } + } +} + +static void +dsl_deleg_unset_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + nvlist_t *nvp = arg2; + objset_t *mos = dd->dd_pool->dp_meta_objset; + nvpair_t *whopair = NULL; + uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; + + if (zapobj == 0) + return; + + while (whopair = nvlist_next_nvpair(nvp, whopair)) { + const char *whokey = nvpair_name(whopair); + nvlist_t *perms; + nvpair_t *permpair = NULL; + uint64_t jumpobj; + + if (nvpair_value_nvlist(whopair, &perms) != 0) { + if (zap_lookup(mos, zapobj, whokey, 8, + 1, &jumpobj) == 0) { + (void) zap_remove(mos, zapobj, whokey, tx); + VERIFY(0 == zap_destroy(mos, jumpobj, tx)); + } + spa_history_internal_log(LOG_DS_PERM_WHO_REMOVE, + dd->dd_pool->dp_spa, tx, cr, + "%s dataset = %llu", whokey, + dd->dd_phys->dd_head_dataset_obj); + continue; + } + + if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) + continue; + + while (permpair = nvlist_next_nvpair(perms, permpair)) { + const char *perm = nvpair_name(permpair); + uint64_t n = 0; + + (void) zap_remove(mos, jumpobj, perm, tx); + if (zap_count(mos, jumpobj, &n) == 0 && n == 0) { + (void) zap_remove(mos, zapobj, + whokey, tx); + VERIFY(0 == zap_destroy(mos, + jumpobj, tx)); + } + spa_history_internal_log(LOG_DS_PERM_REMOVE, + dd->dd_pool->dp_spa, tx, cr, + "%s %s dataset = %llu", whokey, perm, + dd->dd_phys->dd_head_dataset_obj); + } + } +} + +int +dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset) +{ + dsl_dir_t *dd; + int error; + nvpair_t *whopair = NULL; + int blocks_modified = 0; + + error = dsl_dir_open(ddname, FTAG, &dd, NULL); + if (error) + return (error); + + if (spa_version(dmu_objset_spa(dd->dd_pool->dp_meta_objset)) < + SPA_VERSION_DELEGATED_PERMS) { + dsl_dir_close(dd, FTAG); + return (ENOTSUP); + } + + while (whopair = nvlist_next_nvpair(nvp, whopair)) + blocks_modified++; + + error = dsl_sync_task_do(dd->dd_pool, NULL, + unset ? dsl_deleg_unset_sync : dsl_deleg_set_sync, + dd, nvp, blocks_modified); + dsl_dir_close(dd, FTAG); + + return (error); +} + +/* + * Find all 'allow' permissions from a given point and then continue + * traversing up to the root. + * + * This function constructs an nvlist of nvlists. + * each setpoint is an nvlist composed of an nvlist of an nvlist + * of the individual * users/groups/everyone/create + * permissions. + * + * The nvlist will look like this. + * + * { source fsname -> { whokeys { permissions,...}, ...}} + * + * The fsname nvpairs will be arranged in a bottom up order. For example, + * if we have the following structure a/b/c then the nvpairs for the fsnames + * will be ordered a/b/c, a/b, a. + */ +int +dsl_deleg_get(const char *ddname, nvlist_t **nvp) +{ + dsl_dir_t *dd, *startdd; + dsl_pool_t *dp; + int error; + objset_t *mos; + + error = dsl_dir_open(ddname, FTAG, &startdd, NULL); + if (error) + return (error); + + dp = startdd->dd_pool; + mos = dp->dp_meta_objset; + + VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + rw_enter(&dp->dp_config_rwlock, RW_READER); + for (dd = startdd; dd != NULL; dd = dd->dd_parent) { + zap_cursor_t basezc; + zap_attribute_t baseza; + nvlist_t *sp_nvp; + uint64_t n; + char source[MAXNAMELEN]; + + if (dd->dd_phys->dd_deleg_zapobj && + (zap_count(mos, dd->dd_phys->dd_deleg_zapobj, + &n) == 0) && n) { + VERIFY(nvlist_alloc(&sp_nvp, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + } else { + continue; + } + + for (zap_cursor_init(&basezc, mos, + dd->dd_phys->dd_deleg_zapobj); + zap_cursor_retrieve(&basezc, &baseza) == 0; + zap_cursor_advance(&basezc)) { + zap_cursor_t zc; + zap_attribute_t za; + nvlist_t *perms_nvp; + + ASSERT(baseza.za_integer_length == 8); + ASSERT(baseza.za_num_integers == 1); + + VERIFY(nvlist_alloc(&perms_nvp, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + for (zap_cursor_init(&zc, mos, baseza.za_first_integer); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + VERIFY(nvlist_add_boolean(perms_nvp, + za.za_name) == 0); + } + zap_cursor_fini(&zc); + VERIFY(nvlist_add_nvlist(sp_nvp, baseza.za_name, + perms_nvp) == 0); + nvlist_free(perms_nvp); + } + + zap_cursor_fini(&basezc); + + dsl_dir_name(dd, source); + VERIFY(nvlist_add_nvlist(*nvp, source, sp_nvp) == 0); + nvlist_free(sp_nvp); + } + rw_exit(&dp->dp_config_rwlock); + + dsl_dir_close(startdd, FTAG); + return (0); +} + +/* + * Routines for dsl_deleg_access() -- access checking. + */ +typedef struct perm_set { + avl_node_t p_node; + boolean_t p_matched; + char p_setname[ZFS_MAX_DELEG_NAME]; +} perm_set_t; + +static int +perm_set_compare(const void *arg1, const void *arg2) +{ + const perm_set_t *node1 = arg1; + const perm_set_t *node2 = arg2; + int val; + + val = strcmp(node1->p_setname, node2->p_setname); + if (val == 0) + return (0); + return (val > 0 ? 1 : -1); +} + +/* + * Determine whether a specified permission exists. + * + * First the base attribute has to be retrieved. i.e. ul$12 + * Once the base object has been retrieved the actual permission + * is lookup up in the zap object the base object points to. + * + * Return 0 if permission exists, ENOENT if there is no whokey, EPERM if + * there is no perm in that jumpobj. + */ +static int +dsl_check_access(objset_t *mos, uint64_t zapobj, + char type, char checkflag, void *valp, const char *perm) +{ + int error; + uint64_t jumpobj, zero; + char whokey[ZFS_MAX_DELEG_NAME]; + + zfs_deleg_whokey(whokey, type, checkflag, valp); + error = zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj); + if (error == 0) { + error = zap_lookup(mos, jumpobj, perm, 8, 1, &zero); + if (error == ENOENT) + error = EPERM; + } + return (error); +} + +/* + * check a specified user/group for a requested permission + */ +static int +dsl_check_user_access(objset_t *mos, uint64_t zapobj, const char *perm, + int checkflag, cred_t *cr) +{ + const gid_t *gids; + int ngids; + int i; + uint64_t id; + + /* check for user */ + id = crgetuid(cr); + if (dsl_check_access(mos, zapobj, + ZFS_DELEG_USER, checkflag, &id, perm) == 0) + return (0); + + /* check for users primary group */ + id = crgetgid(cr); + if (dsl_check_access(mos, zapobj, + ZFS_DELEG_GROUP, checkflag, &id, perm) == 0) + return (0); + + /* check for everyone entry */ + id = -1; + if (dsl_check_access(mos, zapobj, + ZFS_DELEG_EVERYONE, checkflag, &id, perm) == 0) + return (0); + + /* check each supplemental group user is a member of */ + ngids = crgetngroups(cr); + gids = crgetgroups(cr); + for (i = 0; i != ngids; i++) { + id = gids[i]; + if (dsl_check_access(mos, zapobj, + ZFS_DELEG_GROUP, checkflag, &id, perm) == 0) + return (0); + } + + return (EPERM); +} + +/* + * Iterate over the sets specified in the specified zapobj + * and load them into the permsets avl tree. + */ +static int +dsl_load_sets(objset_t *mos, uint64_t zapobj, + char type, char checkflag, void *valp, avl_tree_t *avl) +{ + zap_cursor_t zc; + zap_attribute_t za; + perm_set_t *permnode; + avl_index_t idx; + uint64_t jumpobj; + int error; + char whokey[ZFS_MAX_DELEG_NAME]; + + zfs_deleg_whokey(whokey, type, checkflag, valp); + + error = zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj); + if (error != 0) + return (error); + + for (zap_cursor_init(&zc, mos, jumpobj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + permnode = kmem_alloc(sizeof (perm_set_t), KM_SLEEP); + (void) strlcpy(permnode->p_setname, za.za_name, + sizeof (permnode->p_setname)); + permnode->p_matched = B_FALSE; + + if (avl_find(avl, permnode, &idx) == NULL) { + avl_insert(avl, permnode, idx); + } else { + kmem_free(permnode, sizeof (perm_set_t)); + } + } + zap_cursor_fini(&zc); + return (0); +} + +/* + * Load all permissions user based on cred belongs to. + */ +static void +dsl_load_user_sets(objset_t *mos, uint64_t zapobj, avl_tree_t *avl, + char checkflag, cred_t *cr) +{ + const gid_t *gids; + int ngids, i; + uint64_t id; + + id = crgetuid(cr); + (void) dsl_load_sets(mos, zapobj, + ZFS_DELEG_USER_SETS, checkflag, &id, avl); + + id = crgetgid(cr); + (void) dsl_load_sets(mos, zapobj, + ZFS_DELEG_GROUP_SETS, checkflag, &id, avl); + + (void) dsl_load_sets(mos, zapobj, + ZFS_DELEG_EVERYONE_SETS, checkflag, NULL, avl); + + ngids = crgetngroups(cr); + gids = crgetgroups(cr); + for (i = 0; i != ngids; i++) { + id = gids[i]; + (void) dsl_load_sets(mos, zapobj, + ZFS_DELEG_GROUP_SETS, checkflag, &id, avl); + } +} + +/* + * Check if user has requested permission. + */ +int +dsl_deleg_access(const char *dsname, const char *perm, cred_t *cr) +{ + dsl_dataset_t *ds; + dsl_dir_t *dd; + dsl_pool_t *dp; + void *cookie; + int error; + char checkflag = ZFS_DELEG_LOCAL; + objset_t *mos; + avl_tree_t permsets; + perm_set_t *setnode; + + error = dsl_dataset_hold(dsname, FTAG, &ds); + if (error) + return (error); + + dp = ds->ds_dir->dd_pool; + mos = dp->dp_meta_objset; + + if (dsl_delegation_on(mos) == B_FALSE) { + dsl_dataset_rele(ds, FTAG); + return (ECANCELED); + } + + if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) < + SPA_VERSION_DELEGATED_PERMS) { + dsl_dataset_rele(ds, FTAG); + return (EPERM); + } + + avl_create(&permsets, perm_set_compare, sizeof (perm_set_t), + offsetof(perm_set_t, p_node)); + + rw_enter(&dp->dp_config_rwlock, RW_READER); + for (dd = ds->ds_dir; dd != NULL; dd = dd->dd_parent, + checkflag = ZFS_DELEG_DESCENDENT) { + uint64_t zapobj; + boolean_t expanded; + + /* + * If not in global zone then make sure + * the zoned property is set + */ + if (!INGLOBALZONE(curproc)) { + uint64_t zoned; + + if (dsl_prop_get_dd(dd, + zfs_prop_to_name(ZFS_PROP_ZONED), + 8, 1, &zoned, NULL) != 0) + break; + if (!zoned) + break; + } + zapobj = dd->dd_phys->dd_deleg_zapobj; + + if (zapobj == 0) + continue; + + dsl_load_user_sets(mos, zapobj, &permsets, checkflag, cr); +again: + expanded = B_FALSE; + for (setnode = avl_first(&permsets); setnode; + setnode = AVL_NEXT(&permsets, setnode)) { + if (setnode->p_matched == B_TRUE) + continue; + + /* See if this set directly grants this permission */ + error = dsl_check_access(mos, zapobj, + ZFS_DELEG_NAMED_SET, 0, setnode->p_setname, perm); + if (error == 0) + goto success; + if (error == EPERM) + setnode->p_matched = B_TRUE; + + /* See if this set includes other sets */ + error = dsl_load_sets(mos, zapobj, + ZFS_DELEG_NAMED_SET_SETS, 0, + setnode->p_setname, &permsets); + if (error == 0) + setnode->p_matched = expanded = B_TRUE; + } + /* + * If we expanded any sets, that will define more sets, + * which we need to check. + */ + if (expanded) + goto again; + + error = dsl_check_user_access(mos, zapobj, perm, checkflag, cr); + if (error == 0) + goto success; + } + error = EPERM; +success: + rw_exit(&dp->dp_config_rwlock); + dsl_dataset_rele(ds, FTAG); + + cookie = NULL; + while ((setnode = avl_destroy_nodes(&permsets, &cookie)) != NULL) + kmem_free(setnode, sizeof (perm_set_t)); + + return (error); +} + +/* + * Other routines. + */ + +static void +copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj, + boolean_t dosets, uint64_t uid, dmu_tx_t *tx) +{ + objset_t *mos = dd->dd_pool->dp_meta_objset; + uint64_t jumpobj, pjumpobj; + uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; + zap_cursor_t zc; + zap_attribute_t za; + char whokey[ZFS_MAX_DELEG_NAME]; + + zfs_deleg_whokey(whokey, + dosets ? ZFS_DELEG_CREATE_SETS : ZFS_DELEG_CREATE, + ZFS_DELEG_LOCAL, NULL); + if (zap_lookup(mos, pzapobj, whokey, 8, 1, &pjumpobj) != 0) + return; + + if (zapobj == 0) { + dmu_buf_will_dirty(dd->dd_dbuf, tx); + zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos, + DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); + } + + zfs_deleg_whokey(whokey, + dosets ? ZFS_DELEG_USER_SETS : ZFS_DELEG_USER, + ZFS_DELEG_LOCAL, &uid); + if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) { + jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); + VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0); + } + + for (zap_cursor_init(&zc, mos, pjumpobj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + uint64_t zero = 0; + ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1); + + VERIFY(zap_update(mos, jumpobj, za.za_name, + 8, 1, &zero, tx) == 0); + } + zap_cursor_fini(&zc); +} + +/* + * set all create time permission on new dataset. + */ +void +dsl_deleg_set_create_perms(dsl_dir_t *sdd, dmu_tx_t *tx, cred_t *cr) +{ + dsl_dir_t *dd; + uint64_t uid = crgetuid(cr); + + if (spa_version(dmu_objset_spa(sdd->dd_pool->dp_meta_objset)) < + SPA_VERSION_DELEGATED_PERMS) + return; + + for (dd = sdd->dd_parent; dd != NULL; dd = dd->dd_parent) { + uint64_t pzapobj = dd->dd_phys->dd_deleg_zapobj; + + if (pzapobj == 0) + continue; + + copy_create_perms(sdd, pzapobj, B_FALSE, uid, tx); + copy_create_perms(sdd, pzapobj, B_TRUE, uid, tx); + } +} + +int +dsl_deleg_destroy(objset_t *mos, uint64_t zapobj, dmu_tx_t *tx) +{ + zap_cursor_t zc; + zap_attribute_t za; + + if (zapobj == 0) + return (0); + + for (zap_cursor_init(&zc, mos, zapobj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1); + VERIFY(0 == zap_destroy(mos, za.za_first_integer, tx)); + } + zap_cursor_fini(&zc); + VERIFY(0 == zap_destroy(mos, zapobj, tx)); + return (0); +} + +boolean_t +dsl_delegation_on(objset_t *os) +{ + return (os->os->os_spa->spa_delegation); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dir.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dir.c new file mode 100644 index 000000000000..48d87f97f669 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_dir.c @@ -0,0 +1,1331 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zfs_namecheck.h" + +static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd); +static void dsl_dir_set_reservation_sync(void *arg1, void *arg2, + cred_t *cr, dmu_tx_t *tx); + + +/* ARGSUSED */ +static void +dsl_dir_evict(dmu_buf_t *db, void *arg) +{ + dsl_dir_t *dd = arg; + dsl_pool_t *dp = dd->dd_pool; + int t; + + for (t = 0; t < TXG_SIZE; t++) { + ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); + ASSERT(dd->dd_tempreserved[t] == 0); + ASSERT(dd->dd_space_towrite[t] == 0); + } + + if (dd->dd_parent) + dsl_dir_close(dd->dd_parent, dd); + + spa_close(dd->dd_pool->dp_spa, dd); + + /* + * The props callback list should be empty since they hold the + * dir open. + */ + list_destroy(&dd->dd_prop_cbs); + mutex_destroy(&dd->dd_lock); + kmem_free(dd, sizeof (dsl_dir_t)); +} + +int +dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj, + const char *tail, void *tag, dsl_dir_t **ddp) +{ + dmu_buf_t *dbuf; + dsl_dir_t *dd; + int err; + + ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || + dsl_pool_sync_context(dp)); + + err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf); + if (err) + return (err); + dd = dmu_buf_get_user(dbuf); +#ifdef ZFS_DEBUG + { + dmu_object_info_t doi; + dmu_object_info_from_db(dbuf, &doi); + ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR); + ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t)); + } +#endif + if (dd == NULL) { + dsl_dir_t *winner; + int err; + + dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP); + dd->dd_object = ddobj; + dd->dd_dbuf = dbuf; + dd->dd_pool = dp; + dd->dd_phys = dbuf->db_data; + mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL); + + list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t), + offsetof(dsl_prop_cb_record_t, cbr_node)); + + if (dd->dd_phys->dd_parent_obj) { + err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj, + NULL, dd, &dd->dd_parent); + if (err) + goto errout; + if (tail) { +#ifdef ZFS_DEBUG + uint64_t foundobj; + + err = zap_lookup(dp->dp_meta_objset, + dd->dd_parent->dd_phys->dd_child_dir_zapobj, + tail, sizeof (foundobj), 1, &foundobj); + ASSERT(err || foundobj == ddobj); +#endif + (void) strcpy(dd->dd_myname, tail); + } else { + err = zap_value_search(dp->dp_meta_objset, + dd->dd_parent->dd_phys->dd_child_dir_zapobj, + ddobj, 0, dd->dd_myname); + } + if (err) + goto errout; + } else { + (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa)); + } + + winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys, + dsl_dir_evict); + if (winner) { + if (dd->dd_parent) + dsl_dir_close(dd->dd_parent, dd); + mutex_destroy(&dd->dd_lock); + kmem_free(dd, sizeof (dsl_dir_t)); + dd = winner; + } else { + spa_open_ref(dp->dp_spa, dd); + } + } + + /* + * The dsl_dir_t has both open-to-close and instantiate-to-evict + * holds on the spa. We need the open-to-close holds because + * otherwise the spa_refcnt wouldn't change when we open a + * dir which the spa also has open, so we could incorrectly + * think it was OK to unload/export/destroy the pool. We need + * the instantiate-to-evict hold because the dsl_dir_t has a + * pointer to the dd_pool, which has a pointer to the spa_t. + */ + spa_open_ref(dp->dp_spa, tag); + ASSERT3P(dd->dd_pool, ==, dp); + ASSERT3U(dd->dd_object, ==, ddobj); + ASSERT3P(dd->dd_dbuf, ==, dbuf); + *ddp = dd; + return (0); + +errout: + if (dd->dd_parent) + dsl_dir_close(dd->dd_parent, dd); + mutex_destroy(&dd->dd_lock); + kmem_free(dd, sizeof (dsl_dir_t)); + dmu_buf_rele(dbuf, tag); + return (err); + +} + +void +dsl_dir_close(dsl_dir_t *dd, void *tag) +{ + dprintf_dd(dd, "%s\n", ""); + spa_close(dd->dd_pool->dp_spa, tag); + dmu_buf_rele(dd->dd_dbuf, tag); +} + +/* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ +void +dsl_dir_name(dsl_dir_t *dd, char *buf) +{ + if (dd->dd_parent) { + dsl_dir_name(dd->dd_parent, buf); + (void) strcat(buf, "/"); + } else { + buf[0] = '\0'; + } + if (!MUTEX_HELD(&dd->dd_lock)) { + /* + * recursive mutex so that we can use + * dprintf_dd() with dd_lock held + */ + mutex_enter(&dd->dd_lock); + (void) strcat(buf, dd->dd_myname); + mutex_exit(&dd->dd_lock); + } else { + (void) strcat(buf, dd->dd_myname); + } +} + +/* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */ +int +dsl_dir_namelen(dsl_dir_t *dd) +{ + int result = 0; + + if (dd->dd_parent) { + /* parent's name + 1 for the "/" */ + result = dsl_dir_namelen(dd->dd_parent) + 1; + } + + if (!MUTEX_HELD(&dd->dd_lock)) { + /* see dsl_dir_name */ + mutex_enter(&dd->dd_lock); + result += strlen(dd->dd_myname); + mutex_exit(&dd->dd_lock); + } else { + result += strlen(dd->dd_myname); + } + + return (result); +} + +int +dsl_dir_is_private(dsl_dir_t *dd) +{ + int rv = FALSE; + + if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent)) + rv = TRUE; + if (dataset_name_hidden(dd->dd_myname)) + rv = TRUE; + return (rv); +} + + +static int +getcomponent(const char *path, char *component, const char **nextp) +{ + char *p; + if (path == NULL) + return (ENOENT); + /* This would be a good place to reserve some namespace... */ + p = strpbrk(path, "/@"); + if (p && (p[1] == '/' || p[1] == '@')) { + /* two separators in a row */ + return (EINVAL); + } + if (p == NULL || p == path) { + /* + * if the first thing is an @ or /, it had better be an + * @ and it had better not have any more ats or slashes, + * and it had better have something after the @. + */ + if (p != NULL && + (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0')) + return (EINVAL); + if (strlen(path) >= MAXNAMELEN) + return (ENAMETOOLONG); + (void) strcpy(component, path); + p = NULL; + } else if (p[0] == '/') { + if (p-path >= MAXNAMELEN) + return (ENAMETOOLONG); + (void) strncpy(component, path, p - path); + component[p-path] = '\0'; + p++; + } else if (p[0] == '@') { + /* + * if the next separator is an @, there better not be + * any more slashes. + */ + if (strchr(path, '/')) + return (EINVAL); + if (p-path >= MAXNAMELEN) + return (ENAMETOOLONG); + (void) strncpy(component, path, p - path); + component[p-path] = '\0'; + } else { + ASSERT(!"invalid p"); + } + *nextp = p; + return (0); +} + +/* + * same as dsl_open_dir, ignore the first component of name and use the + * spa instead + */ +int +dsl_dir_open_spa(spa_t *spa, const char *name, void *tag, + dsl_dir_t **ddp, const char **tailp) +{ + char buf[MAXNAMELEN]; + const char *next, *nextnext = NULL; + int err; + dsl_dir_t *dd; + dsl_pool_t *dp; + uint64_t ddobj; + int openedspa = FALSE; + + dprintf("%s\n", name); + + err = getcomponent(name, buf, &next); + if (err) + return (err); + if (spa == NULL) { + err = spa_open(buf, &spa, FTAG); + if (err) { + dprintf("spa_open(%s) failed\n", buf); + return (err); + } + openedspa = TRUE; + + /* XXX this assertion belongs in spa_open */ + ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa))); + } + + dp = spa_get_dsl(spa); + + rw_enter(&dp->dp_config_rwlock, RW_READER); + err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd); + if (err) { + rw_exit(&dp->dp_config_rwlock); + if (openedspa) + spa_close(spa, FTAG); + return (err); + } + + while (next != NULL) { + dsl_dir_t *child_ds; + err = getcomponent(next, buf, &nextnext); + if (err) + break; + ASSERT(next[0] != '\0'); + if (next[0] == '@') + break; + dprintf("looking up %s in obj%lld\n", + buf, dd->dd_phys->dd_child_dir_zapobj); + + err = zap_lookup(dp->dp_meta_objset, + dd->dd_phys->dd_child_dir_zapobj, + buf, sizeof (ddobj), 1, &ddobj); + if (err) { + if (err == ENOENT) + err = 0; + break; + } + + err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds); + if (err) + break; + dsl_dir_close(dd, tag); + dd = child_ds; + next = nextnext; + } + rw_exit(&dp->dp_config_rwlock); + + if (err) { + dsl_dir_close(dd, tag); + if (openedspa) + spa_close(spa, FTAG); + return (err); + } + + /* + * It's an error if there's more than one component left, or + * tailp==NULL and there's any component left. + */ + if (next != NULL && + (tailp == NULL || (nextnext && nextnext[0] != '\0'))) { + /* bad path name */ + dsl_dir_close(dd, tag); + dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp); + err = ENOENT; + } + if (tailp) + *tailp = next; + if (openedspa) + spa_close(spa, FTAG); + *ddp = dd; + return (err); +} + +/* + * Return the dsl_dir_t, and possibly the last component which couldn't + * be found in *tail. Return NULL if the path is bogus, or if + * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@' + * means that the last component is a snapshot. + */ +int +dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp) +{ + return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp)); +} + +uint64_t +dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name, + dmu_tx_t *tx) +{ + objset_t *mos = dp->dp_meta_objset; + uint64_t ddobj; + dsl_dir_phys_t *dsphys; + dmu_buf_t *dbuf; + + ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, + DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); + if (pds) { + VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj, + name, sizeof (uint64_t), 1, &ddobj, tx)); + } else { + /* it's the root dir */ + VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx)); + } + VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf)); + dmu_buf_will_dirty(dbuf, tx); + dsphys = dbuf->db_data; + + dsphys->dd_creation_time = gethrestime_sec(); + if (pds) + dsphys->dd_parent_obj = pds->dd_object; + dsphys->dd_props_zapobj = zap_create(mos, + DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); + dsphys->dd_child_dir_zapobj = zap_create(mos, + DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); + if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN) + dsphys->dd_flags |= DD_FLAG_USED_BREAKDOWN; + dmu_buf_rele(dbuf, FTAG); + + return (ddobj); +} + +/* ARGSUSED */ +int +dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + dsl_pool_t *dp = dd->dd_pool; + objset_t *mos = dp->dp_meta_objset; + int err; + uint64_t count; + + /* + * There should be exactly two holds, both from + * dsl_dataset_destroy: one on the dd directory, and one on its + * head ds. Otherwise, someone is trying to lookup something + * inside this dir while we want to destroy it. The + * config_rwlock ensures that nobody else opens it after we + * check. + */ + if (dmu_buf_refcount(dd->dd_dbuf) > 2) + return (EBUSY); + + err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count); + if (err) + return (err); + if (count != 0) + return (EEXIST); + + return (0); +} + +void +dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + objset_t *mos = dd->dd_pool->dp_meta_objset; + uint64_t val, obj; + dd_used_t t; + + ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock)); + ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); + + /* Remove our reservation. */ + val = 0; + dsl_dir_set_reservation_sync(dd, &val, cr, tx); + ASSERT3U(dd->dd_phys->dd_used_bytes, ==, 0); + ASSERT3U(dd->dd_phys->dd_reserved, ==, 0); + for (t = 0; t < DD_USED_NUM; t++) + ASSERT3U(dd->dd_phys->dd_used_breakdown[t], ==, 0); + + VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx)); + VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx)); + VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx)); + VERIFY(0 == zap_remove(mos, + dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx)); + + obj = dd->dd_object; + dsl_dir_close(dd, tag); + VERIFY(0 == dmu_object_free(mos, obj, tx)); +} + +boolean_t +dsl_dir_is_clone(dsl_dir_t *dd) +{ + return (dd->dd_phys->dd_origin_obj && + (dd->dd_pool->dp_origin_snap == NULL || + dd->dd_phys->dd_origin_obj != + dd->dd_pool->dp_origin_snap->ds_object)); +} + +void +dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv) +{ + mutex_enter(&dd->dd_lock); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, + dd->dd_phys->dd_used_bytes); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION, + dd->dd_phys->dd_reserved); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, + dd->dd_phys->dd_compressed_bytes == 0 ? 100 : + (dd->dd_phys->dd_uncompressed_bytes * 100 / + dd->dd_phys->dd_compressed_bytes)); + if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP, + dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS, + dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV, + dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]); + dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD, + dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] + + dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]); + } + mutex_exit(&dd->dd_lock); + + rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); + if (dsl_dir_is_clone(dd)) { + dsl_dataset_t *ds; + char buf[MAXNAMELEN]; + + VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, + dd->dd_phys->dd_origin_obj, FTAG, &ds)); + dsl_dataset_name(ds, buf); + dsl_dataset_rele(ds, FTAG); + dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf); + } + rw_exit(&dd->dd_pool->dp_config_rwlock); +} + +void +dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx) +{ + dsl_pool_t *dp = dd->dd_pool; + + ASSERT(dd->dd_phys); + + if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) { + /* up the hold count until we can be written out */ + dmu_buf_add_ref(dd->dd_dbuf, dd); + } +} + +static int64_t +parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta) +{ + uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved); + uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved); + return (new_accounted - old_accounted); +} + +void +dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx) +{ + ASSERT(dmu_tx_is_syncing(tx)); + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + + mutex_enter(&dd->dd_lock); + ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0); + dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg, + dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024); + dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0; + mutex_exit(&dd->dd_lock); + + /* release the hold from dsl_dir_dirty */ + dmu_buf_rele(dd->dd_dbuf, dd); +} + +static uint64_t +dsl_dir_space_towrite(dsl_dir_t *dd) +{ + uint64_t space = 0; + int i; + + ASSERT(MUTEX_HELD(&dd->dd_lock)); + + for (i = 0; i < TXG_SIZE; i++) { + space += dd->dd_space_towrite[i&TXG_MASK]; + ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0); + } + return (space); +} + +/* + * How much space would dd have available if ancestor had delta applied + * to it? If ondiskonly is set, we're only interested in what's + * on-disk, not estimated pending changes. + */ +uint64_t +dsl_dir_space_available(dsl_dir_t *dd, + dsl_dir_t *ancestor, int64_t delta, int ondiskonly) +{ + uint64_t parentspace, myspace, quota, used; + + /* + * If there are no restrictions otherwise, assume we have + * unlimited space available. + */ + quota = UINT64_MAX; + parentspace = UINT64_MAX; + + if (dd->dd_parent != NULL) { + parentspace = dsl_dir_space_available(dd->dd_parent, + ancestor, delta, ondiskonly); + } + + mutex_enter(&dd->dd_lock); + if (dd->dd_phys->dd_quota != 0) + quota = dd->dd_phys->dd_quota; + used = dd->dd_phys->dd_used_bytes; + if (!ondiskonly) + used += dsl_dir_space_towrite(dd); + + if (dd->dd_parent == NULL) { + uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE); + quota = MIN(quota, poolsize); + } + + if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) { + /* + * We have some space reserved, in addition to what our + * parent gave us. + */ + parentspace += dd->dd_phys->dd_reserved - used; + } + + if (dd == ancestor) { + ASSERT(delta <= 0); + ASSERT(used >= -delta); + used += delta; + if (parentspace != UINT64_MAX) + parentspace -= delta; + } + + if (used > quota) { + /* over quota */ + myspace = 0; + + /* + * While it's OK to be a little over quota, if + * we think we are using more space than there + * is in the pool (which is already 1.6% more than + * dsl_pool_adjustedsize()), something is very + * wrong. + */ + ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa)); + } else { + /* + * the lesser of the space provided by our parent and + * the space left in our quota + */ + myspace = MIN(parentspace, quota - used); + } + + mutex_exit(&dd->dd_lock); + + return (myspace); +} + +struct tempreserve { + list_node_t tr_node; + dsl_pool_t *tr_dp; + dsl_dir_t *tr_ds; + uint64_t tr_size; +}; + +static int +dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree, + boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list, + dmu_tx_t *tx, boolean_t first) +{ + uint64_t txg = tx->tx_txg; + uint64_t est_inflight, used_on_disk, quota, parent_rsrv; + struct tempreserve *tr; + int enospc = EDQUOT; + int txgidx = txg & TXG_MASK; + int i; + uint64_t ref_rsrv = 0; + + ASSERT3U(txg, !=, 0); + ASSERT3S(asize, >, 0); + + mutex_enter(&dd->dd_lock); + + /* + * Check against the dsl_dir's quota. We don't add in the delta + * when checking for over-quota because they get one free hit. + */ + est_inflight = dsl_dir_space_towrite(dd); + for (i = 0; i < TXG_SIZE; i++) + est_inflight += dd->dd_tempreserved[i]; + used_on_disk = dd->dd_phys->dd_used_bytes; + + /* + * On the first iteration, fetch the dataset's used-on-disk and + * refreservation values. Also, if checkrefquota is set, test if + * allocating this space would exceed the dataset's refquota. + */ + if (first && tx->tx_objset) { + int error; + dsl_dataset_t *ds = tx->tx_objset->os->os_dsl_dataset; + + error = dsl_dataset_check_quota(ds, checkrefquota, + asize, est_inflight, &used_on_disk, &ref_rsrv); + if (error) { + mutex_exit(&dd->dd_lock); + return (error); + } + } + + /* + * If this transaction will result in a net free of space, + * we want to let it through. + */ + if (ignorequota || netfree || dd->dd_phys->dd_quota == 0) + quota = UINT64_MAX; + else + quota = dd->dd_phys->dd_quota; + + /* + * Adjust the quota against the actual pool size at the root. + * To ensure that it's possible to remove files from a full + * pool without inducing transient overcommits, we throttle + * netfree transactions against a quota that is slightly larger, + * but still within the pool's allocation slop. In cases where + * we're very close to full, this will allow a steady trickle of + * removes to get through. + */ + if (dd->dd_parent == NULL) { + uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree); + if (poolsize < quota) { + quota = poolsize; + enospc = ENOSPC; + } + } + + /* + * If they are requesting more space, and our current estimate + * is over quota, they get to try again unless the actual + * on-disk is over quota and there are no pending changes (which + * may free up space for us). + */ + if (used_on_disk + est_inflight > quota) { + if (est_inflight > 0 || used_on_disk < quota) + enospc = ERESTART; + dprintf_dd(dd, "failing: used=%lluK inflight = %lluK " + "quota=%lluK tr=%lluK err=%d\n", + used_on_disk>>10, est_inflight>>10, + quota>>10, asize>>10, enospc); + mutex_exit(&dd->dd_lock); + return (enospc); + } + + /* We need to up our estimated delta before dropping dd_lock */ + dd->dd_tempreserved[txgidx] += asize; + + parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, + asize - ref_rsrv); + mutex_exit(&dd->dd_lock); + + tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); + tr->tr_ds = dd; + tr->tr_size = asize; + list_insert_tail(tr_list, tr); + + /* see if it's OK with our parent */ + if (dd->dd_parent && parent_rsrv) { + boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0); + + return (dsl_dir_tempreserve_impl(dd->dd_parent, + parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE)); + } else { + return (0); + } +} + +/* + * Reserve space in this dsl_dir, to be used in this tx's txg. + * After the space has been dirtied (and dsl_dir_willuse_space() + * has been called), the reservation should be canceled, using + * dsl_dir_tempreserve_clear(). + */ +int +dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize, + uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx) +{ + int err; + list_t *tr_list; + + if (asize == 0) { + *tr_cookiep = NULL; + return (0); + } + + tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP); + list_create(tr_list, sizeof (struct tempreserve), + offsetof(struct tempreserve, tr_node)); + ASSERT3S(asize, >, 0); + ASSERT3S(fsize, >=, 0); + + err = arc_tempreserve_space(lsize, tx->tx_txg); + if (err == 0) { + struct tempreserve *tr; + + tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); + tr->tr_size = lsize; + list_insert_tail(tr_list, tr); + + err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx); + } else { + if (err == EAGAIN) { + txg_delay(dd->dd_pool, tx->tx_txg, 1); + err = ERESTART; + } + dsl_pool_memory_pressure(dd->dd_pool); + } + + if (err == 0) { + struct tempreserve *tr; + + tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); + tr->tr_dp = dd->dd_pool; + tr->tr_size = asize; + list_insert_tail(tr_list, tr); + + err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize, + FALSE, asize > usize, tr_list, tx, TRUE); + } + + if (err) + dsl_dir_tempreserve_clear(tr_list, tx); + else + *tr_cookiep = tr_list; + + return (err); +} + +/* + * Clear a temporary reservation that we previously made with + * dsl_dir_tempreserve_space(). + */ +void +dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) +{ + int txgidx = tx->tx_txg & TXG_MASK; + list_t *tr_list = tr_cookie; + struct tempreserve *tr; + + ASSERT3U(tx->tx_txg, !=, 0); + + if (tr_cookie == NULL) + return; + + while (tr = list_head(tr_list)) { + if (tr->tr_dp) { + dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx); + } else if (tr->tr_ds) { + mutex_enter(&tr->tr_ds->dd_lock); + ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=, + tr->tr_size); + tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size; + mutex_exit(&tr->tr_ds->dd_lock); + } else { + arc_tempreserve_clear(tr->tr_size); + } + list_remove(tr_list, tr); + kmem_free(tr, sizeof (struct tempreserve)); + } + + kmem_free(tr_list, sizeof (list_t)); +} + +static void +dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) +{ + int64_t parent_space; + uint64_t est_used; + + mutex_enter(&dd->dd_lock); + if (space > 0) + dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; + + est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes; + parent_space = parent_delta(dd, est_used, space); + mutex_exit(&dd->dd_lock); + + /* Make sure that we clean up dd_space_to* */ + dsl_dir_dirty(dd, tx); + + /* XXX this is potentially expensive and unnecessary... */ + if (parent_space && dd->dd_parent) + dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx); +} + +/* + * Call in open context when we think we're going to write/free space, + * eg. when dirtying data. Be conservative (ie. OK to write less than + * this or free more than this, but don't write more or free less). + */ +void +dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) +{ + dsl_pool_willuse_space(dd->dd_pool, space, tx); + dsl_dir_willuse_space_impl(dd, space, tx); +} + +/* call from syncing context when we actually write/free space for this dd */ +void +dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, + int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) +{ + int64_t accounted_delta; + boolean_t needlock = !MUTEX_HELD(&dd->dd_lock); + + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(type < DD_USED_NUM); + + dsl_dir_dirty(dd, tx); + + if (needlock) + mutex_enter(&dd->dd_lock); + accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used); + ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used); + ASSERT(compressed >= 0 || + dd->dd_phys->dd_compressed_bytes >= -compressed); + ASSERT(uncompressed >= 0 || + dd->dd_phys->dd_uncompressed_bytes >= -uncompressed); + dd->dd_phys->dd_used_bytes += used; + dd->dd_phys->dd_uncompressed_bytes += uncompressed; + dd->dd_phys->dd_compressed_bytes += compressed; + + if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { + ASSERT(used > 0 || + dd->dd_phys->dd_used_breakdown[type] >= -used); + dd->dd_phys->dd_used_breakdown[type] += used; +#ifdef DEBUG + dd_used_t t; + uint64_t u = 0; + for (t = 0; t < DD_USED_NUM; t++) + u += dd->dd_phys->dd_used_breakdown[t]; + ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes); +#endif + } + if (needlock) + mutex_exit(&dd->dd_lock); + + if (dd->dd_parent != NULL) { + dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD, + accounted_delta, compressed, uncompressed, tx); + dsl_dir_transfer_space(dd->dd_parent, + used - accounted_delta, + DD_USED_CHILD_RSRV, DD_USED_CHILD, tx); + } +} + +void +dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta, + dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx) +{ + boolean_t needlock = !MUTEX_HELD(&dd->dd_lock); + + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(oldtype < DD_USED_NUM); + ASSERT(newtype < DD_USED_NUM); + + if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN)) + return; + + dsl_dir_dirty(dd, tx); + if (needlock) + mutex_enter(&dd->dd_lock); + ASSERT(delta > 0 ? + dd->dd_phys->dd_used_breakdown[oldtype] >= delta : + dd->dd_phys->dd_used_breakdown[newtype] >= -delta); + ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta)); + dd->dd_phys->dd_used_breakdown[oldtype] -= delta; + dd->dd_phys->dd_used_breakdown[newtype] += delta; + if (needlock) + mutex_exit(&dd->dd_lock); +} + +static int +dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + uint64_t *quotap = arg2; + uint64_t new_quota = *quotap; + int err = 0; + uint64_t towrite; + + if (new_quota == 0) + return (0); + + mutex_enter(&dd->dd_lock); + /* + * If we are doing the preliminary check in open context, and + * there are pending changes, then don't fail it, since the + * pending changes could under-estimate the amount of space to be + * freed up. + */ + towrite = dsl_dir_space_towrite(dd); + if ((dmu_tx_is_syncing(tx) || towrite == 0) && + (new_quota < dd->dd_phys->dd_reserved || + new_quota < dd->dd_phys->dd_used_bytes + towrite)) { + err = ENOSPC; + } + mutex_exit(&dd->dd_lock); + return (err); +} + +/* ARGSUSED */ +static void +dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + uint64_t *quotap = arg2; + uint64_t new_quota = *quotap; + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + + mutex_enter(&dd->dd_lock); + dd->dd_phys->dd_quota = new_quota; + mutex_exit(&dd->dd_lock); + + spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa, + tx, cr, "%lld dataset = %llu ", + (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj); +} + +int +dsl_dir_set_quota(const char *ddname, uint64_t quota) +{ + dsl_dir_t *dd; + int err; + + err = dsl_dir_open(ddname, FTAG, &dd, NULL); + if (err) + return (err); + + if (quota != dd->dd_phys->dd_quota) { + /* + * If someone removes a file, then tries to set the quota, we + * want to make sure the file freeing takes effect. + */ + txg_wait_open(dd->dd_pool, 0); + + err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check, + dsl_dir_set_quota_sync, dd, "a, 0); + } + dsl_dir_close(dd, FTAG); + return (err); +} + +int +dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + uint64_t *reservationp = arg2; + uint64_t new_reservation = *reservationp; + uint64_t used, avail; + int64_t delta; + + if (new_reservation > INT64_MAX) + return (EOVERFLOW); + + /* + * If we are doing the preliminary check in open context, the + * space estimates may be inaccurate. + */ + if (!dmu_tx_is_syncing(tx)) + return (0); + + mutex_enter(&dd->dd_lock); + used = dd->dd_phys->dd_used_bytes; + delta = MAX(used, new_reservation) - + MAX(used, dd->dd_phys->dd_reserved); + mutex_exit(&dd->dd_lock); + + if (dd->dd_parent) { + avail = dsl_dir_space_available(dd->dd_parent, + NULL, 0, FALSE); + } else { + avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used; + } + + if (delta > 0 && delta > avail) + return (ENOSPC); + if (delta > 0 && dd->dd_phys->dd_quota > 0 && + new_reservation > dd->dd_phys->dd_quota) + return (ENOSPC); + return (0); +} + +/* ARGSUSED */ +static void +dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + uint64_t *reservationp = arg2; + uint64_t new_reservation = *reservationp; + uint64_t used; + int64_t delta; + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + + mutex_enter(&dd->dd_lock); + used = dd->dd_phys->dd_used_bytes; + delta = MAX(used, new_reservation) - + MAX(used, dd->dd_phys->dd_reserved); + dd->dd_phys->dd_reserved = new_reservation; + + if (dd->dd_parent != NULL) { + /* Roll up this additional usage into our ancestors */ + dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, + delta, 0, 0, tx); + } + mutex_exit(&dd->dd_lock); + + spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa, + tx, cr, "%lld dataset = %llu", + (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj); +} + +int +dsl_dir_set_reservation(const char *ddname, uint64_t reservation) +{ + dsl_dir_t *dd; + int err; + + err = dsl_dir_open(ddname, FTAG, &dd, NULL); + if (err) + return (err); + err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check, + dsl_dir_set_reservation_sync, dd, &reservation, 0); + dsl_dir_close(dd, FTAG); + return (err); +} + +static dsl_dir_t * +closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2) +{ + for (; ds1; ds1 = ds1->dd_parent) { + dsl_dir_t *dd; + for (dd = ds2; dd; dd = dd->dd_parent) { + if (ds1 == dd) + return (dd); + } + } + return (NULL); +} + +/* + * If delta is applied to dd, how much of that delta would be applied to + * ancestor? Syncing context only. + */ +static int64_t +would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor) +{ + if (dd == ancestor) + return (delta); + + mutex_enter(&dd->dd_lock); + delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta); + mutex_exit(&dd->dd_lock); + return (would_change(dd->dd_parent, delta, ancestor)); +} + +struct renamearg { + dsl_dir_t *newparent; + const char *mynewname; +}; + +/*ARGSUSED*/ +static int +dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct renamearg *ra = arg2; + dsl_pool_t *dp = dd->dd_pool; + objset_t *mos = dp->dp_meta_objset; + int err; + uint64_t val; + + /* There should be 2 references: the open and the dirty */ + if (dmu_buf_refcount(dd->dd_dbuf) > 2) + return (EBUSY); + + /* check for existing name */ + err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, + ra->mynewname, 8, 1, &val); + if (err == 0) + return (EEXIST); + if (err != ENOENT) + return (err); + + if (ra->newparent != dd->dd_parent) { + /* is there enough space? */ + uint64_t myspace = + MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved); + + /* no rename into our descendant */ + if (closest_common_ancestor(dd, ra->newparent) == dd) + return (EINVAL); + + if (err = dsl_dir_transfer_possible(dd->dd_parent, + ra->newparent, myspace)) + return (err); + } + + return (0); +} + +static void +dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dir_t *dd = arg1; + struct renamearg *ra = arg2; + dsl_pool_t *dp = dd->dd_pool; + objset_t *mos = dp->dp_meta_objset; + int err; + + ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2); + + if (ra->newparent != dd->dd_parent) { + dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD, + -dd->dd_phys->dd_used_bytes, + -dd->dd_phys->dd_compressed_bytes, + -dd->dd_phys->dd_uncompressed_bytes, tx); + dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD, + dd->dd_phys->dd_used_bytes, + dd->dd_phys->dd_compressed_bytes, + dd->dd_phys->dd_uncompressed_bytes, tx); + + if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) { + uint64_t unused_rsrv = dd->dd_phys->dd_reserved - + dd->dd_phys->dd_used_bytes; + + dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, + -unused_rsrv, 0, 0, tx); + dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV, + unused_rsrv, 0, 0, tx); + } + } + + dmu_buf_will_dirty(dd->dd_dbuf, tx); + + /* remove from old parent zapobj */ + err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj, + dd->dd_myname, tx); + ASSERT3U(err, ==, 0); + + (void) strcpy(dd->dd_myname, ra->mynewname); + dsl_dir_close(dd->dd_parent, dd); + dd->dd_phys->dd_parent_obj = ra->newparent->dd_object; + VERIFY(0 == dsl_dir_open_obj(dd->dd_pool, + ra->newparent->dd_object, NULL, dd, &dd->dd_parent)); + + /* add to new parent zapobj */ + err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, + dd->dd_myname, 8, 1, &dd->dd_object, tx); + ASSERT3U(err, ==, 0); + + spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, + tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj); +} + +int +dsl_dir_rename(dsl_dir_t *dd, const char *newname) +{ + struct renamearg ra; + int err; + + /* new parent should exist */ + err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname); + if (err) + return (err); + + /* can't rename to different pool */ + if (dd->dd_pool != ra.newparent->dd_pool) { + err = ENXIO; + goto out; + } + + /* new name should not already exist */ + if (ra.mynewname == NULL) { + err = EEXIST; + goto out; + } + + err = dsl_sync_task_do(dd->dd_pool, + dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3); + +out: + dsl_dir_close(ra.newparent, FTAG); + return (err); +} + +int +dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space) +{ + dsl_dir_t *ancestor; + int64_t adelta; + uint64_t avail; + + ancestor = closest_common_ancestor(sdd, tdd); + adelta = would_change(sdd, -space, ancestor); + avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE); + if (avail < space) + return (ENOSPC); + + return (0); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_pool.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_pool.c new file mode 100644 index 000000000000..dacc57c81c25 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_pool.c @@ -0,0 +1,613 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int zfs_no_write_throttle = 0; +int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ +int zfs_txg_synctime = 5; /* target secs to sync a txg */ + +uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ +uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ +uint64_t zfs_write_limit_inflated = 0; +uint64_t zfs_write_limit_override = 0; + +kmutex_t zfs_write_limit_lock; + +static pgcnt_t old_physmem = 0; + +static int +dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) +{ + uint64_t obj; + int err; + + err = zap_lookup(dp->dp_meta_objset, + dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, + name, sizeof (obj), 1, &obj); + if (err) + return (err); + + return (dsl_dir_open_obj(dp, obj, name, dp, ddp)); +} + +static dsl_pool_t * +dsl_pool_open_impl(spa_t *spa, uint64_t txg) +{ + dsl_pool_t *dp; + blkptr_t *bp = spa_get_rootblkptr(spa); + + dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); + dp->dp_spa = spa; + dp->dp_meta_rootbp = *bp; + rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL); + dp->dp_write_limit = zfs_write_limit_min; + txg_init(dp, txg); + + txg_list_create(&dp->dp_dirty_datasets, + offsetof(dsl_dataset_t, ds_dirty_link)); + txg_list_create(&dp->dp_dirty_dirs, + offsetof(dsl_dir_t, dd_dirty_link)); + txg_list_create(&dp->dp_sync_tasks, + offsetof(dsl_sync_task_group_t, dstg_node)); + list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t), + offsetof(dsl_dataset_t, ds_synced_link)); + + mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL); + + return (dp); +} + +int +dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) +{ + int err; + dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); + dsl_dir_t *dd; + dsl_dataset_t *ds; + objset_impl_t *osi; + + rw_enter(&dp->dp_config_rwlock, RW_WRITER); + err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi); + if (err) + goto out; + dp->dp_meta_objset = &osi->os; + + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, + &dp->dp_root_dir_obj); + if (err) + goto out; + + err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, + NULL, dp, &dp->dp_root_dir); + if (err) + goto out; + + err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); + if (err) + goto out; + + if (spa_version(spa) >= SPA_VERSION_ORIGIN) { + err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); + if (err) + goto out; + err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, + FTAG, &ds); + if (err) + goto out; + err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, + dp, &dp->dp_origin_snap); + if (err) + goto out; + dsl_dataset_rele(ds, FTAG); + dsl_dir_close(dd, dp); + } + + /* get scrub status */ + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, + &dp->dp_scrub_func); + if (err == 0) { + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, + &dp->dp_scrub_queue_obj); + if (err) + goto out; + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, + &dp->dp_scrub_min_txg); + if (err) + goto out; + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, + &dp->dp_scrub_max_txg); + if (err) + goto out; + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, + &dp->dp_scrub_bookmark); + if (err) + goto out; + err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, + &spa->spa_scrub_errors); + if (err) + goto out; + if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) { + /* + * A new-type scrub was in progress on an old + * pool. Restart from the beginning, since the + * old software may have changed the pool in the + * meantime. + */ + dsl_pool_scrub_restart(dp); + } + } else { + /* + * It's OK if there is no scrub in progress (and if + * there was an I/O error, ignore it). + */ + err = 0; + } + +out: + rw_exit(&dp->dp_config_rwlock); + if (err) + dsl_pool_close(dp); + else + *dpp = dp; + + return (err); +} + +void +dsl_pool_close(dsl_pool_t *dp) +{ + /* drop our references from dsl_pool_open() */ + + /* + * Since we held the origin_snap from "syncing" context (which + * includes pool-opening context), it actually only got a "ref" + * and not a hold, so just drop that here. + */ + if (dp->dp_origin_snap) + dsl_dataset_drop_ref(dp->dp_origin_snap, dp); + if (dp->dp_mos_dir) + dsl_dir_close(dp->dp_mos_dir, dp); + if (dp->dp_root_dir) + dsl_dir_close(dp->dp_root_dir, dp); + + /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ + if (dp->dp_meta_objset) + dmu_objset_evict(NULL, dp->dp_meta_objset->os); + + txg_list_destroy(&dp->dp_dirty_datasets); + txg_list_destroy(&dp->dp_dirty_dirs); + list_destroy(&dp->dp_synced_datasets); + + arc_flush(dp->dp_spa); + txg_fini(dp); + rw_destroy(&dp->dp_config_rwlock); + mutex_destroy(&dp->dp_lock); + mutex_destroy(&dp->dp_scrub_cancel_lock); + if (dp->dp_blkstats) + kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); + kmem_free(dp, sizeof (dsl_pool_t)); +} + +dsl_pool_t * +dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) +{ + int err; + dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); + dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); + objset_impl_t *osip; + dsl_dataset_t *ds; + uint64_t dsobj; + + /* create and open the MOS (meta-objset) */ + dp->dp_meta_objset = &dmu_objset_create_impl(spa, + NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os; + + /* create the pool directory */ + err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); + ASSERT3U(err, ==, 0); + + /* create and open the root dir */ + dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); + VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj, + NULL, dp, &dp->dp_root_dir)); + + /* create and open the meta-objset dir */ + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); + VERIFY(0 == dsl_pool_open_special_dir(dp, + MOS_DIR_NAME, &dp->dp_mos_dir)); + + if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) + dsl_pool_create_origin(dp, tx); + + /* create the root dataset */ + dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); + + /* create the root objset */ + VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); + osip = dmu_objset_create_impl(dp->dp_spa, ds, + dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); +#ifdef _KERNEL + zfs_create_fs(&osip->os, kcred, zplprops, tx); +#endif + dsl_dataset_rele(ds, FTAG); + + dmu_tx_commit(tx); + + return (dp); +} + +void +dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) +{ + zio_t *zio; + dmu_tx_t *tx; + dsl_dir_t *dd; + dsl_dataset_t *ds; + dsl_sync_task_group_t *dstg; + objset_impl_t *mosi = dp->dp_meta_objset->os; + hrtime_t start, write_time; + uint64_t data_written; + int err; + + tx = dmu_tx_create_assigned(dp, txg); + + dp->dp_read_overhead = 0; + zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { + if (!list_link_active(&ds->ds_synced_link)) + list_insert_tail(&dp->dp_synced_datasets, ds); + else + dmu_buf_rele(ds->ds_dbuf, ds); + dsl_dataset_sync(ds, zio, tx); + } + DTRACE_PROBE(pool_sync__1setup); + + start = gethrtime(); + err = zio_wait(zio); + write_time = gethrtime() - start; + ASSERT(err == 0); + DTRACE_PROBE(pool_sync__2rootzio); + + while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) + dsl_sync_task_group_sync(dstg, tx); + DTRACE_PROBE(pool_sync__3task); + + start = gethrtime(); + while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) + dsl_dir_sync(dd, tx); + write_time += gethrtime() - start; + + if (spa_sync_pass(dp->dp_spa) == 1) + dsl_pool_scrub_sync(dp, tx); + + start = gethrtime(); + if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL || + list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) { + zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + dmu_objset_sync(mosi, zio, tx); + err = zio_wait(zio); + ASSERT(err == 0); + dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); + spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); + } + write_time += gethrtime() - start; + DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time, + hrtime_t, dp->dp_read_overhead); + write_time -= dp->dp_read_overhead; + + dmu_tx_commit(tx); + + data_written = dp->dp_space_towrite[txg & TXG_MASK]; + dp->dp_space_towrite[txg & TXG_MASK] = 0; + ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); + + /* + * If the write limit max has not been explicitly set, set it + * to a fraction of available physical memory (default 1/8th). + * Note that we must inflate the limit because the spa + * inflates write sizes to account for data replication. + * Check this each sync phase to catch changing memory size. + */ + if (physmem != old_physmem && zfs_write_limit_shift) { + mutex_enter(&zfs_write_limit_lock); + old_physmem = physmem; + zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; + zfs_write_limit_inflated = MAX(zfs_write_limit_min, + spa_get_asize(dp->dp_spa, zfs_write_limit_max)); + mutex_exit(&zfs_write_limit_lock); + } + + /* + * Attempt to keep the sync time consistent by adjusting the + * amount of write traffic allowed into each transaction group. + * Weight the throughput calculation towards the current value: + * thru = 3/4 old_thru + 1/4 new_thru + */ + ASSERT(zfs_write_limit_min > 0); + if (data_written > zfs_write_limit_min / 8 && write_time > 0) { + uint64_t throughput = (data_written * NANOSEC) / write_time; + if (dp->dp_throughput) + dp->dp_throughput = throughput / 4 + + 3 * dp->dp_throughput / 4; + else + dp->dp_throughput = throughput; + dp->dp_write_limit = MIN(zfs_write_limit_inflated, + MAX(zfs_write_limit_min, + dp->dp_throughput * zfs_txg_synctime)); + } +} + +void +dsl_pool_zil_clean(dsl_pool_t *dp) +{ + dsl_dataset_t *ds; + + while (ds = list_head(&dp->dp_synced_datasets)) { + list_remove(&dp->dp_synced_datasets, ds); + ASSERT(ds->ds_user_ptr != NULL); + zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); + dmu_buf_rele(ds->ds_dbuf, ds); + } +} + +/* + * TRUE if the current thread is the tx_sync_thread or if we + * are being called from SPA context during pool initialization. + */ +int +dsl_pool_sync_context(dsl_pool_t *dp) +{ + return (curthread == dp->dp_tx.tx_sync_thread || + spa_get_dsl(dp->dp_spa) == NULL); +} + +uint64_t +dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) +{ + uint64_t space, resv; + + /* + * Reserve about 1.6% (1/64), or at least 32MB, for allocation + * efficiency. + * XXX The intent log is not accounted for, so it must fit + * within this slop. + * + * If we're trying to assess whether it's OK to do a free, + * cut the reservation in half to allow forward progress + * (e.g. make it possible to rm(1) files from a full pool). + */ + space = spa_get_dspace(dp->dp_spa); + resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1); + if (netfree) + resv >>= 1; + + return (space - resv); +} + +int +dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) +{ + uint64_t reserved = 0; + uint64_t write_limit = (zfs_write_limit_override ? + zfs_write_limit_override : dp->dp_write_limit); + + if (zfs_no_write_throttle) { + atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], + space); + return (0); + } + + /* + * Check to see if we have exceeded the maximum allowed IO for + * this transaction group. We can do this without locks since + * a little slop here is ok. Note that we do the reserved check + * with only half the requested reserve: this is because the + * reserve requests are worst-case, and we really don't want to + * throttle based off of worst-case estimates. + */ + if (write_limit > 0) { + reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] + + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; + + if (reserved && reserved > write_limit) + return (ERESTART); + } + + atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); + + /* + * If this transaction group is over 7/8ths capacity, delay + * the caller 1 clock tick. This will slow down the "fill" + * rate until the sync process can catch up with us. + */ + if (reserved && reserved > (write_limit - (write_limit >> 3))) + txg_delay(dp, tx->tx_txg, 1); + + return (0); +} + +void +dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) +{ + ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space); + atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space); +} + +void +dsl_pool_memory_pressure(dsl_pool_t *dp) +{ + uint64_t space_inuse = 0; + int i; + + if (dp->dp_write_limit == zfs_write_limit_min) + return; + + for (i = 0; i < TXG_SIZE; i++) { + space_inuse += dp->dp_space_towrite[i]; + space_inuse += dp->dp_tempreserved[i]; + } + dp->dp_write_limit = MAX(zfs_write_limit_min, + MIN(dp->dp_write_limit, space_inuse / 4)); +} + +void +dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) +{ + if (space > 0) { + mutex_enter(&dp->dp_lock); + dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space; + mutex_exit(&dp->dp_lock); + } +} + +/* ARGSUSED */ +static int +upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) +{ + dmu_tx_t *tx = arg; + dsl_dataset_t *ds, *prev = NULL; + int err; + dsl_pool_t *dp = spa_get_dsl(spa); + + err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); + if (err) + return (err); + + while (ds->ds_phys->ds_prev_snap_obj != 0) { + err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, + FTAG, &prev); + if (err) { + dsl_dataset_rele(ds, FTAG); + return (err); + } + + if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) + break; + dsl_dataset_rele(ds, FTAG); + ds = prev; + prev = NULL; + } + + if (prev == NULL) { + prev = dp->dp_origin_snap; + + /* + * The $ORIGIN can't have any data, or the accounting + * will be wrong. + */ + ASSERT(prev->ds_phys->ds_bp.blk_birth == 0); + + /* The origin doesn't get attached to itself */ + if (ds->ds_object == prev->ds_object) { + dsl_dataset_rele(ds, FTAG); + return (0); + } + + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_prev_snap_obj = prev->ds_object; + ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg; + + dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); + ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object; + + dmu_buf_will_dirty(prev->ds_dbuf, tx); + prev->ds_phys->ds_num_children++; + + if (ds->ds_phys->ds_next_snap_obj == 0) { + ASSERT(ds->ds_prev == NULL); + VERIFY(0 == dsl_dataset_hold_obj(dp, + ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); + } + } + + ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object); + ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); + + if (prev->ds_phys->ds_next_clones_obj == 0) { + prev->ds_phys->ds_next_clones_obj = + zap_create(dp->dp_meta_objset, + DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); + } + VERIFY(0 == zap_add_int(dp->dp_meta_objset, + prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx)); + + dsl_dataset_rele(ds, FTAG); + if (prev != dp->dp_origin_snap) + dsl_dataset_rele(prev, FTAG); + return (0); +} + +void +dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) +{ + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(dp->dp_origin_snap != NULL); + + (void) dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, + tx, DS_FIND_CHILDREN); +} + +void +dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) +{ + uint64_t dsobj; + dsl_dataset_t *ds; + + ASSERT(dmu_tx_is_syncing(tx)); + ASSERT(dp->dp_origin_snap == NULL); + + /* create the origin dir, ds, & snap-ds */ + rw_enter(&dp->dp_config_rwlock, RW_WRITER); + dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, + NULL, 0, kcred, tx); + VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); + dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx); + VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, + dp, &dp->dp_origin_snap)); + dsl_dataset_rele(ds, FTAG); + rw_exit(&dp->dp_config_rwlock); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_prop.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_prop.c new file mode 100644 index 000000000000..212acbbc5968 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_prop.c @@ -0,0 +1,602 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for the default checksum value */ +#include +#include + +#include "zfs_prop.h" + +static int +dodefault(const char *propname, int intsz, int numint, void *buf) +{ + zfs_prop_t prop; + + /* + * The setonce properties are read-only, BUT they still + * have a default value that can be used as the initial + * value. + */ + if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL || + (zfs_prop_readonly(prop) && !zfs_prop_setonce(prop))) + return (ENOENT); + + if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) { + if (intsz != 1) + return (EOVERFLOW); + (void) strncpy(buf, zfs_prop_default_string(prop), + numint); + } else { + if (intsz != 8 || numint < 1) + return (EOVERFLOW); + + *(uint64_t *)buf = zfs_prop_default_numeric(prop); + } + + return (0); +} + +int +dsl_prop_get_dd(dsl_dir_t *dd, const char *propname, + int intsz, int numint, void *buf, char *setpoint) +{ + int err = ENOENT; + objset_t *mos = dd->dd_pool->dp_meta_objset; + zfs_prop_t prop; + + ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock)); + + if (setpoint) + setpoint[0] = '\0'; + + prop = zfs_name_to_prop(propname); + + /* + * Note: dd may be NULL, therefore we shouldn't dereference it + * ouside this loop. + */ + for (; dd != NULL; dd = dd->dd_parent) { + ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock)); + err = zap_lookup(mos, dd->dd_phys->dd_props_zapobj, + propname, intsz, numint, buf); + if (err != ENOENT) { + if (setpoint) + dsl_dir_name(dd, setpoint); + break; + } + + /* + * Break out of this loop for non-inheritable properties. + */ + if (prop != ZPROP_INVAL && !zfs_prop_inheritable(prop)) + break; + } + if (err == ENOENT) + err = dodefault(propname, intsz, numint, buf); + + return (err); +} + +int +dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname, + int intsz, int numint, void *buf, char *setpoint) +{ + ASSERT(RW_LOCK_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock)); + + if (ds->ds_phys->ds_props_obj) { + int err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, + ds->ds_phys->ds_props_obj, propname, intsz, numint, buf); + if (err != ENOENT) { + if (setpoint) + dsl_dataset_name(ds, setpoint); + return (err); + } + } + + return (dsl_prop_get_dd(ds->ds_dir, propname, + intsz, numint, buf, setpoint)); +} + +/* + * Register interest in the named property. We'll call the callback + * once to notify it of the current property value, and again each time + * the property changes, until this callback is unregistered. + * + * Return 0 on success, errno if the prop is not an integer value. + */ +int +dsl_prop_register(dsl_dataset_t *ds, const char *propname, + dsl_prop_changed_cb_t *callback, void *cbarg) +{ + dsl_dir_t *dd = ds->ds_dir; + dsl_pool_t *dp = dd->dd_pool; + uint64_t value; + dsl_prop_cb_record_t *cbr; + int err; + int need_rwlock; + + need_rwlock = !RW_WRITE_HELD(&dp->dp_config_rwlock); + if (need_rwlock) + rw_enter(&dp->dp_config_rwlock, RW_READER); + + err = dsl_prop_get_ds(ds, propname, 8, 1, &value, NULL); + if (err != 0) { + if (need_rwlock) + rw_exit(&dp->dp_config_rwlock); + return (err); + } + + cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP); + cbr->cbr_ds = ds; + cbr->cbr_propname = kmem_alloc(strlen(propname)+1, KM_SLEEP); + (void) strcpy((char *)cbr->cbr_propname, propname); + cbr->cbr_func = callback; + cbr->cbr_arg = cbarg; + mutex_enter(&dd->dd_lock); + list_insert_head(&dd->dd_prop_cbs, cbr); + mutex_exit(&dd->dd_lock); + + cbr->cbr_func(cbr->cbr_arg, value); + + VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, + NULL, cbr, &dd)); + if (need_rwlock) + rw_exit(&dp->dp_config_rwlock); + /* Leave dir open until this callback is unregistered */ + return (0); +} + +int +dsl_prop_get(const char *dsname, const char *propname, + int intsz, int numints, void *buf, char *setpoint) +{ + dsl_dataset_t *ds; + int err; + + err = dsl_dataset_hold(dsname, FTAG, &ds); + if (err) + return (err); + + rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); + err = dsl_prop_get_ds(ds, propname, intsz, numints, buf, setpoint); + rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); + + dsl_dataset_rele(ds, FTAG); + return (err); +} + +/* + * Get the current property value. It may have changed by the time this + * function returns, so it is NOT safe to follow up with + * dsl_prop_register() and assume that the value has not changed in + * between. + * + * Return 0 on success, ENOENT if ddname is invalid. + */ +int +dsl_prop_get_integer(const char *ddname, const char *propname, + uint64_t *valuep, char *setpoint) +{ + return (dsl_prop_get(ddname, propname, 8, 1, valuep, setpoint)); +} + +/* + * Unregister this callback. Return 0 on success, ENOENT if ddname is + * invalid, ENOMSG if no matching callback registered. + */ +int +dsl_prop_unregister(dsl_dataset_t *ds, const char *propname, + dsl_prop_changed_cb_t *callback, void *cbarg) +{ + dsl_dir_t *dd = ds->ds_dir; + dsl_prop_cb_record_t *cbr; + + mutex_enter(&dd->dd_lock); + for (cbr = list_head(&dd->dd_prop_cbs); + cbr; cbr = list_next(&dd->dd_prop_cbs, cbr)) { + if (cbr->cbr_ds == ds && + cbr->cbr_func == callback && + cbr->cbr_arg == cbarg && + strcmp(cbr->cbr_propname, propname) == 0) + break; + } + + if (cbr == NULL) { + mutex_exit(&dd->dd_lock); + return (ENOMSG); + } + + list_remove(&dd->dd_prop_cbs, cbr); + mutex_exit(&dd->dd_lock); + kmem_free((void*)cbr->cbr_propname, strlen(cbr->cbr_propname)+1); + kmem_free(cbr, sizeof (dsl_prop_cb_record_t)); + + /* Clean up from dsl_prop_register */ + dsl_dir_close(dd, cbr); + return (0); +} + +/* + * Return the number of callbacks that are registered for this dataset. + */ +int +dsl_prop_numcb(dsl_dataset_t *ds) +{ + dsl_dir_t *dd = ds->ds_dir; + dsl_prop_cb_record_t *cbr; + int num = 0; + + mutex_enter(&dd->dd_lock); + for (cbr = list_head(&dd->dd_prop_cbs); + cbr; cbr = list_next(&dd->dd_prop_cbs, cbr)) { + if (cbr->cbr_ds == ds) + num++; + } + mutex_exit(&dd->dd_lock); + + return (num); +} + +static void +dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj, + const char *propname, uint64_t value, int first) +{ + dsl_dir_t *dd; + dsl_prop_cb_record_t *cbr; + objset_t *mos = dp->dp_meta_objset; + zap_cursor_t zc; + zap_attribute_t *za; + int err; + uint64_t dummyval; + + ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); + err = dsl_dir_open_obj(dp, ddobj, NULL, FTAG, &dd); + if (err) + return; + + if (!first) { + /* + * If the prop is set here, then this change is not + * being inherited here or below; stop the recursion. + */ + err = zap_lookup(mos, dd->dd_phys->dd_props_zapobj, propname, + 8, 1, &dummyval); + if (err == 0) { + dsl_dir_close(dd, FTAG); + return; + } + ASSERT3U(err, ==, ENOENT); + } + + mutex_enter(&dd->dd_lock); + for (cbr = list_head(&dd->dd_prop_cbs); cbr; + cbr = list_next(&dd->dd_prop_cbs, cbr)) { + uint64_t propobj = cbr->cbr_ds->ds_phys->ds_props_obj; + + if (strcmp(cbr->cbr_propname, propname) != 0) + continue; + + /* + * If the property is set on this ds, then it is not + * inherited here; don't call the callback. + */ + if (propobj && 0 == zap_lookup(mos, propobj, propname, + 8, 1, &dummyval)) + continue; + + cbr->cbr_func(cbr->cbr_arg, value); + } + mutex_exit(&dd->dd_lock); + + za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); + for (zap_cursor_init(&zc, mos, + dd->dd_phys->dd_child_dir_zapobj); + zap_cursor_retrieve(&zc, za) == 0; + zap_cursor_advance(&zc)) { + dsl_prop_changed_notify(dp, za->za_first_integer, + propname, value, FALSE); + } + kmem_free(za, sizeof (zap_attribute_t)); + zap_cursor_fini(&zc); + dsl_dir_close(dd, FTAG); +} + +struct prop_set_arg { + const char *name; + int intsz; + int numints; + const void *buf; +}; + + +static void +dsl_prop_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_dataset_t *ds = arg1; + struct prop_set_arg *psa = arg2; + objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; + uint64_t zapobj, intval; + int isint; + char valbuf[32]; + char *valstr; + + isint = (dodefault(psa->name, 8, 1, &intval) == 0); + + if (dsl_dataset_is_snapshot(ds)) { + ASSERT(spa_version(ds->ds_dir->dd_pool->dp_spa) >= + SPA_VERSION_SNAP_PROPS); + if (ds->ds_phys->ds_props_obj == 0) { + dmu_buf_will_dirty(ds->ds_dbuf, tx); + ds->ds_phys->ds_props_obj = + zap_create(mos, + DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); + } + zapobj = ds->ds_phys->ds_props_obj; + } else { + zapobj = ds->ds_dir->dd_phys->dd_props_zapobj; + } + + if (psa->numints == 0) { + int err = zap_remove(mos, zapobj, psa->name, tx); + ASSERT(err == 0 || err == ENOENT); + if (isint) { + VERIFY(0 == dsl_prop_get_ds(ds, + psa->name, 8, 1, &intval, NULL)); + } + } else { + VERIFY(0 == zap_update(mos, zapobj, psa->name, + psa->intsz, psa->numints, psa->buf, tx)); + if (isint) + intval = *(uint64_t *)psa->buf; + } + + if (isint) { + if (dsl_dataset_is_snapshot(ds)) { + dsl_prop_cb_record_t *cbr; + /* + * It's a snapshot; nothing can inherit this + * property, so just look for callbacks on this + * ds here. + */ + mutex_enter(&ds->ds_dir->dd_lock); + for (cbr = list_head(&ds->ds_dir->dd_prop_cbs); cbr; + cbr = list_next(&ds->ds_dir->dd_prop_cbs, cbr)) { + if (cbr->cbr_ds == ds && + strcmp(cbr->cbr_propname, psa->name) == 0) + cbr->cbr_func(cbr->cbr_arg, intval); + } + mutex_exit(&ds->ds_dir->dd_lock); + } else { + dsl_prop_changed_notify(ds->ds_dir->dd_pool, + ds->ds_dir->dd_object, psa->name, intval, TRUE); + } + } + if (isint) { + (void) snprintf(valbuf, sizeof (valbuf), + "%lld", (longlong_t)intval); + valstr = valbuf; + } else { + valstr = (char *)psa->buf; + } + spa_history_internal_log((psa->numints == 0) ? LOG_DS_INHERIT : + LOG_DS_PROPSET, ds->ds_dir->dd_pool->dp_spa, tx, cr, + "%s=%s dataset = %llu", psa->name, valstr, ds->ds_object); +} + +void +dsl_prop_set_uint64_sync(dsl_dir_t *dd, const char *name, uint64_t val, + cred_t *cr, dmu_tx_t *tx) +{ + objset_t *mos = dd->dd_pool->dp_meta_objset; + uint64_t zapobj = dd->dd_phys->dd_props_zapobj; + + ASSERT(dmu_tx_is_syncing(tx)); + + VERIFY(0 == zap_update(mos, zapobj, name, sizeof (val), 1, &val, tx)); + + dsl_prop_changed_notify(dd->dd_pool, dd->dd_object, name, val, TRUE); + + spa_history_internal_log(LOG_DS_PROPSET, dd->dd_pool->dp_spa, tx, cr, + "%s=%llu dataset = %llu", name, (u_longlong_t)val, + dd->dd_phys->dd_head_dataset_obj); +} + +int +dsl_prop_set(const char *dsname, const char *propname, + int intsz, int numints, const void *buf) +{ + dsl_dataset_t *ds; + int err; + struct prop_set_arg psa; + + /* + * We must do these checks before we get to the syncfunc, since + * it can't fail. + */ + if (strlen(propname) >= ZAP_MAXNAMELEN) + return (ENAMETOOLONG); + if (intsz * numints >= ZAP_MAXVALUELEN) + return (E2BIG); + + err = dsl_dataset_hold(dsname, FTAG, &ds); + if (err) + return (err); + + if (dsl_dataset_is_snapshot(ds) && + spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_SNAP_PROPS) { + dsl_dataset_rele(ds, FTAG); + return (ENOTSUP); + } + + psa.name = propname; + psa.intsz = intsz; + psa.numints = numints; + psa.buf = buf; + err = dsl_sync_task_do(ds->ds_dir->dd_pool, + NULL, dsl_prop_set_sync, ds, &psa, 2); + + dsl_dataset_rele(ds, FTAG); + return (err); +} + +/* + * Iterate over all properties for this dataset and return them in an nvlist. + */ +int +dsl_prop_get_all(objset_t *os, nvlist_t **nvp, boolean_t local) +{ + dsl_dataset_t *ds = os->os->os_dsl_dataset; + dsl_dir_t *dd = ds->ds_dir; + boolean_t snapshot = dsl_dataset_is_snapshot(ds); + int err = 0; + dsl_pool_t *dp = dd->dd_pool; + objset_t *mos = dp->dp_meta_objset; + uint64_t propobj = ds->ds_phys->ds_props_obj; + + VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + if (local && snapshot && !propobj) + return (0); + + rw_enter(&dp->dp_config_rwlock, RW_READER); + while (dd != NULL) { + char setpoint[MAXNAMELEN]; + zap_cursor_t zc; + zap_attribute_t za; + dsl_dir_t *dd_next; + + if (propobj) { + dsl_dataset_name(ds, setpoint); + dd_next = dd; + } else { + dsl_dir_name(dd, setpoint); + propobj = dd->dd_phys->dd_props_zapobj; + dd_next = dd->dd_parent; + } + + for (zap_cursor_init(&zc, mos, propobj); + (err = zap_cursor_retrieve(&zc, &za)) == 0; + zap_cursor_advance(&zc)) { + nvlist_t *propval; + zfs_prop_t prop = zfs_name_to_prop(za.za_name); + + /* Skip non-inheritable properties. */ + if (prop != ZPROP_INVAL && + !zfs_prop_inheritable(prop) && + (dd != ds->ds_dir || (snapshot && dd != dd_next))) + continue; + + /* Skip properties not valid for this type. */ + if (snapshot && prop != ZPROP_INVAL && + !zfs_prop_valid_for_type(prop, ZFS_TYPE_SNAPSHOT)) + continue; + + /* Skip properties already defined */ + if (nvlist_lookup_nvlist(*nvp, za.za_name, + &propval) == 0) + continue; + + VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, + KM_SLEEP) == 0); + if (za.za_integer_length == 1) { + /* + * String property + */ + char *tmp = kmem_alloc(za.za_num_integers, + KM_SLEEP); + err = zap_lookup(mos, propobj, + za.za_name, 1, za.za_num_integers, tmp); + if (err != 0) { + kmem_free(tmp, za.za_num_integers); + break; + } + VERIFY(nvlist_add_string(propval, ZPROP_VALUE, + tmp) == 0); + kmem_free(tmp, za.za_num_integers); + } else { + /* + * Integer property + */ + ASSERT(za.za_integer_length == 8); + (void) nvlist_add_uint64(propval, ZPROP_VALUE, + za.za_first_integer); + } + + VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, + setpoint) == 0); + VERIFY(nvlist_add_nvlist(*nvp, za.za_name, + propval) == 0); + nvlist_free(propval); + } + zap_cursor_fini(&zc); + + if (err != ENOENT) + break; + err = 0; + /* + * If we are just after the props that have been set + * locally, then we are done after the first iteration. + */ + if (local) + break; + dd = dd_next; + propobj = 0; + } + rw_exit(&dp->dp_config_rwlock); + + return (err); +} + +void +dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value) +{ + nvlist_t *propval; + + VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0); + VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(prop), propval) == 0); + nvlist_free(propval); +} + +void +dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value) +{ + nvlist_t *propval; + + VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0); + VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(prop), propval) == 0); + nvlist_free(propval); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_scrub.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_scrub.c new file mode 100644 index 000000000000..950a91f783a4 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_scrub.c @@ -0,0 +1,1014 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *); + +static scrub_cb_t dsl_pool_scrub_clean_cb; +static dsl_syncfunc_t dsl_pool_scrub_cancel_sync; + +int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */ +int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */ +boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ + +extern int zfs_txg_timeout; + +static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = { + NULL, + dsl_pool_scrub_clean_cb +}; + +#define SET_BOOKMARK(zb, objset, object, level, blkid) \ +{ \ + (zb)->zb_objset = objset; \ + (zb)->zb_object = object; \ + (zb)->zb_level = level; \ + (zb)->zb_blkid = blkid; \ +} + +/* ARGSUSED */ +static void +dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_pool_t *dp = arg1; + enum scrub_func *funcp = arg2; + dmu_object_type_t ot = 0; + boolean_t complete = B_FALSE; + + dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx); + + ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE); + ASSERT(*funcp > SCRUB_FUNC_NONE); + ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS); + + dp->dp_scrub_min_txg = 0; + dp->dp_scrub_max_txg = tx->tx_txg; + + if (*funcp == SCRUB_FUNC_CLEAN) { + vdev_t *rvd = dp->dp_spa->spa_root_vdev; + + /* rewrite all disk labels */ + vdev_config_dirty(rvd); + + if (vdev_resilver_needed(rvd, + &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) { + spa_event_notify(dp->dp_spa, NULL, + ESC_ZFS_RESILVER_START); + dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg, + tx->tx_txg); + } + + /* zero out the scrub stats in all vdev_stat_t's */ + vdev_scrub_stat_update(rvd, + dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER : + POOL_SCRUB_EVERYTHING, B_FALSE); + + dp->dp_spa->spa_scrub_started = B_TRUE; + } + + /* back to the generic stuff */ + + if (dp->dp_blkstats == NULL) { + dp->dp_blkstats = + kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); + } + bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); + + if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) + ot = DMU_OT_ZAP_OTHER; + + dp->dp_scrub_func = *funcp; + dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset, + ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx); + bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t)); + dp->dp_scrub_restart = B_FALSE; + dp->dp_spa->spa_scrub_errors = 0; + + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, + &dp->dp_scrub_func, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, + &dp->dp_scrub_queue_obj, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, + &dp->dp_scrub_min_txg, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, + &dp->dp_scrub_max_txg, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, + &dp->dp_scrub_bookmark, tx)); + VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, + &dp->dp_spa->spa_scrub_errors, tx)); + + spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr, + "func=%u mintxg=%llu maxtxg=%llu", + *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg); +} + +int +dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func) +{ + return (dsl_sync_task_do(dp, NULL, + dsl_pool_scrub_setup_sync, dp, &func, 0)); +} + +/* ARGSUSED */ +static void +dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + dsl_pool_t *dp = arg1; + boolean_t *completep = arg2; + + if (dp->dp_scrub_func == SCRUB_FUNC_NONE) + return; + + mutex_enter(&dp->dp_scrub_cancel_lock); + + if (dp->dp_scrub_restart) { + dp->dp_scrub_restart = B_FALSE; + *completep = B_FALSE; + } + + /* XXX this is scrub-clean specific */ + mutex_enter(&dp->dp_spa->spa_scrub_lock); + while (dp->dp_spa->spa_scrub_inflight > 0) { + cv_wait(&dp->dp_spa->spa_scrub_io_cv, + &dp->dp_spa->spa_scrub_lock); + } + mutex_exit(&dp->dp_spa->spa_scrub_lock); + dp->dp_spa->spa_scrub_started = B_FALSE; + dp->dp_spa->spa_scrub_active = B_FALSE; + + dp->dp_scrub_func = SCRUB_FUNC_NONE; + VERIFY(0 == dmu_object_free(dp->dp_meta_objset, + dp->dp_scrub_queue_obj, tx)); + dp->dp_scrub_queue_obj = 0; + bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t)); + + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_QUEUE, tx)); + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MIN_TXG, tx)); + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_MAX_TXG, tx)); + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_BOOKMARK, tx)); + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_FUNC, tx)); + VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_ERRORS, tx)); + + spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr, + "complete=%u", *completep); + + /* below is scrub-clean specific */ + vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE, + *completep); + /* + * If the scrub/resilver completed, update all DTLs to reflect this. + * Whether it succeeded or not, vacate all temporary scrub DTLs. + */ + vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg, + *completep ? dp->dp_scrub_max_txg : 0, B_TRUE); + if (dp->dp_scrub_min_txg && *completep) + spa_event_notify(dp->dp_spa, NULL, ESC_ZFS_RESILVER_FINISH); + spa_errlog_rotate(dp->dp_spa); + + /* + * We may have finished replacing a device. + * Let the async thread assess this and handle the detach. + */ + spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE); + + dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0; + mutex_exit(&dp->dp_scrub_cancel_lock); +} + +int +dsl_pool_scrub_cancel(dsl_pool_t *dp) +{ + boolean_t complete = B_FALSE; + + return (dsl_sync_task_do(dp, NULL, + dsl_pool_scrub_cancel_sync, dp, &complete, 3)); +} + +int +dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp, + zio_done_func_t *done, void *private, uint32_t arc_flags) +{ + /* + * This function will be used by bp-rewrite wad to intercept frees. + */ + return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp, + done, private, arc_flags)); +} + +static boolean_t +bookmark_is_zero(const zbookmark_t *zb) +{ + return (zb->zb_objset == 0 && zb->zb_object == 0 && + zb->zb_level == 0 && zb->zb_blkid == 0); +} + +/* dnp is the dnode for zb1->zb_object */ +static boolean_t +bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1, + const zbookmark_t *zb2) +{ + uint64_t zb1nextL0, zb2thisobj; + + ASSERT(zb1->zb_objset == zb2->zb_objset); + ASSERT(zb1->zb_object != -1ULL); + ASSERT(zb2->zb_level == 0); + + /* + * A bookmark in the deadlist is considered to be after + * everything else. + */ + if (zb2->zb_object == -1ULL) + return (B_TRUE); + + /* The objset_phys_t isn't before anything. */ + if (dnp == NULL) + return (B_FALSE); + + zb1nextL0 = (zb1->zb_blkid + 1) << + ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); + + zb2thisobj = zb2->zb_object ? zb2->zb_object : + zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); + + if (zb1->zb_object == 0) { + uint64_t nextobj = zb1nextL0 * + (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; + return (nextobj <= zb2thisobj); + } + + if (zb1->zb_object < zb2thisobj) + return (B_TRUE); + if (zb1->zb_object > zb2thisobj) + return (B_FALSE); + if (zb2->zb_object == 0) + return (B_FALSE); + return (zb1nextL0 <= zb2->zb_blkid); +} + +static boolean_t +scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb) +{ + int elapsed_ticks; + int mintime; + + if (dp->dp_scrub_pausing) + return (B_TRUE); /* we're already pausing */ + + if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) + return (B_FALSE); /* we're resuming */ + + /* We only know how to resume from level-0 blocks. */ + if (zb->zb_level != 0) + return (B_FALSE); + + mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time : + zfs_scrub_min_time; + elapsed_ticks = lbolt64 - dp->dp_scrub_start_time; + if (elapsed_ticks > hz * zfs_txg_timeout || + (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) { + dprintf("pausing at %llx/%llx/%llx/%llx\n", + (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object, + (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid); + dp->dp_scrub_pausing = B_TRUE; + dp->dp_scrub_bookmark = *zb; + return (B_TRUE); + } + return (B_FALSE); +} + +typedef struct zil_traverse_arg { + dsl_pool_t *zta_dp; + zil_header_t *zta_zh; +} zil_traverse_arg_t; + +/* ARGSUSED */ +static void +traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) +{ + zil_traverse_arg_t *zta = arg; + dsl_pool_t *dp = zta->zta_dp; + zil_header_t *zh = zta->zta_zh; + zbookmark_t zb; + + if (bp->blk_birth <= dp->dp_scrub_min_txg) + return; + + if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) + return; + + zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET]; + zb.zb_object = 0; + zb.zb_level = -1; + zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ]; + VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb)); +} + +/* ARGSUSED */ +static void +traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) +{ + if (lrc->lrc_txtype == TX_WRITE) { + zil_traverse_arg_t *zta = arg; + dsl_pool_t *dp = zta->zta_dp; + zil_header_t *zh = zta->zta_zh; + lr_write_t *lr = (lr_write_t *)lrc; + blkptr_t *bp = &lr->lr_blkptr; + zbookmark_t zb; + + if (bp->blk_birth <= dp->dp_scrub_min_txg) + return; + + if (claim_txg == 0 || bp->blk_birth < claim_txg) + return; + + zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET]; + zb.zb_object = lr->lr_foid; + zb.zb_level = BP_GET_LEVEL(bp); + zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp); + VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb)); + } +} + +static void +traverse_zil(dsl_pool_t *dp, zil_header_t *zh) +{ + uint64_t claim_txg = zh->zh_claim_txg; + zil_traverse_arg_t zta = { dp, zh }; + zilog_t *zilog; + + /* + * We only want to visit blocks that have been claimed but not yet + * replayed (or, in read-only mode, blocks that *would* be claimed). + */ + if (claim_txg == 0 && (spa_mode & FWRITE)) + return; + + zilog = zil_alloc(dp->dp_meta_objset, zh); + + (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta, + claim_txg); + + zil_free(zilog); +} + +static void +scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp, + arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb) +{ + int err; + arc_buf_t *buf = NULL; + + if (bp->blk_birth == 0) + return; + + if (bp->blk_birth <= dp->dp_scrub_min_txg) + return; + + if (scrub_pause(dp, zb)) + return; + + if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) { + /* + * If we already visited this bp & everything below (in + * a prior txg), don't bother doing it again. + */ + if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark)) + return; + + /* + * If we found the block we're trying to resume from, or + * we went past it to a different object, zero it out to + * indicate that it's OK to start checking for pausing + * again. + */ + if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 || + zb->zb_object > dp->dp_scrub_bookmark.zb_object) { + dprintf("resuming at %llx/%llx/%llx/%llx\n", + (longlong_t)zb->zb_objset, + (longlong_t)zb->zb_object, + (longlong_t)zb->zb_level, + (longlong_t)zb->zb_blkid); + bzero(&dp->dp_scrub_bookmark, sizeof (*zb)); + } + } + + if (BP_GET_LEVEL(bp) > 0) { + uint32_t flags = ARC_WAIT; + int i; + blkptr_t *cbp; + int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; + + err = arc_read(NULL, dp->dp_spa, bp, pbuf, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) { + mutex_enter(&dp->dp_spa->spa_scrub_lock); + dp->dp_spa->spa_scrub_errors++; + mutex_exit(&dp->dp_spa->spa_scrub_lock); + return; + } + cbp = buf->b_data; + + for (i = 0; i < epb; i++, cbp++) { + zbookmark_t czb; + + SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, + zb->zb_level - 1, + zb->zb_blkid * epb + i); + scrub_visitbp(dp, dnp, buf, cbp, &czb); + } + } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { + uint32_t flags = ARC_WAIT; + dnode_phys_t *child_dnp; + int i, j; + int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; + + err = arc_read(NULL, dp->dp_spa, bp, pbuf, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) { + mutex_enter(&dp->dp_spa->spa_scrub_lock); + dp->dp_spa->spa_scrub_errors++; + mutex_exit(&dp->dp_spa->spa_scrub_lock); + return; + } + child_dnp = buf->b_data; + + for (i = 0; i < epb; i++, child_dnp++) { + for (j = 0; j < child_dnp->dn_nblkptr; j++) { + zbookmark_t czb; + + SET_BOOKMARK(&czb, zb->zb_objset, + zb->zb_blkid * epb + i, + child_dnp->dn_nlevels - 1, j); + scrub_visitbp(dp, child_dnp, buf, + &child_dnp->dn_blkptr[j], &czb); + } + } + } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { + uint32_t flags = ARC_WAIT; + objset_phys_t *osp; + int j; + + err = arc_read_nolock(NULL, dp->dp_spa, bp, + arc_getbuf_func, &buf, + ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); + if (err) { + mutex_enter(&dp->dp_spa->spa_scrub_lock); + dp->dp_spa->spa_scrub_errors++; + mutex_exit(&dp->dp_spa->spa_scrub_lock); + return; + } + + osp = buf->b_data; + + traverse_zil(dp, &osp->os_zil_header); + + for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) { + zbookmark_t czb; + + SET_BOOKMARK(&czb, zb->zb_objset, 0, + osp->os_meta_dnode.dn_nlevels - 1, j); + scrub_visitbp(dp, &osp->os_meta_dnode, buf, + &osp->os_meta_dnode.dn_blkptr[j], &czb); + } + } + + (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb); + if (buf) + (void) arc_buf_remove_ref(buf, &buf); +} + +static void +scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp) +{ + zbookmark_t zb; + + SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0); + scrub_visitbp(dp, NULL, NULL, bp, &zb); +} + +void +dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) +{ + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + if (dp->dp_scrub_func == SCRUB_FUNC_NONE) + return; + + if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) { + SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0); + } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_object, tx) != 0) { + return; + } + + if (ds->ds_phys->ds_next_snap_obj != 0) { + VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_phys->ds_next_snap_obj, tx) == 0); + } + ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); +} + +void +dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) +{ + dsl_pool_t *dp = ds->ds_dir->dd_pool; + + if (dp->dp_scrub_func == SCRUB_FUNC_NONE) + return; + + ASSERT(ds->ds_phys->ds_prev_snap_obj != 0); + + if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) { + dp->dp_scrub_bookmark.zb_objset = + ds->ds_phys->ds_prev_snap_obj; + } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_object, tx) == 0) { + VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_phys->ds_prev_snap_obj, tx) == 0); + } +} + +void +dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) +{ + dsl_pool_t *dp = ds1->ds_dir->dd_pool; + + if (dp->dp_scrub_func == SCRUB_FUNC_NONE) + return; + + if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) { + dp->dp_scrub_bookmark.zb_objset = ds2->ds_object; + } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) { + dp->dp_scrub_bookmark.zb_objset = ds1->ds_object; + } + + if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds1->ds_object, tx) == 0) { + int err = zap_add_int(dp->dp_meta_objset, + dp->dp_scrub_queue_obj, ds2->ds_object, tx); + VERIFY(err == 0 || err == EEXIST); + if (err == EEXIST) { + /* Both were there to begin with */ + VERIFY(0 == zap_add_int(dp->dp_meta_objset, + dp->dp_scrub_queue_obj, ds1->ds_object, tx)); + } + } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds2->ds_object, tx) == 0) { + VERIFY(0 == zap_add_int(dp->dp_meta_objset, + dp->dp_scrub_queue_obj, ds1->ds_object, tx)); + } +} + +struct enqueue_clones_arg { + dmu_tx_t *tx; + uint64_t originobj; +}; + +/* ARGSUSED */ +static int +enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) +{ + struct enqueue_clones_arg *eca = arg; + dsl_dataset_t *ds; + int err; + dsl_pool_t *dp; + + err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds); + if (err) + return (err); + dp = ds->ds_dir->dd_pool; + + if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) { + while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) { + dsl_dataset_t *prev; + err = dsl_dataset_hold_obj(dp, + ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); + + dsl_dataset_rele(ds, FTAG); + if (err) + return (err); + ds = prev; + } + VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_object, eca->tx) == 0); + } + dsl_dataset_rele(ds, FTAG); + return (0); +} + +static void +scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx) +{ + dsl_dataset_t *ds; + uint64_t min_txg_save; + + VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); + + /* + * Iterate over the bps in this ds. + */ + min_txg_save = dp->dp_scrub_min_txg; + dp->dp_scrub_min_txg = + MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg); + scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp); + dp->dp_scrub_min_txg = min_txg_save; + + if (dp->dp_scrub_pausing) + goto out; + + /* + * Add descendent datasets to work queue. + */ + if (ds->ds_phys->ds_next_snap_obj != 0) { + VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_phys->ds_next_snap_obj, tx) == 0); + } + if (ds->ds_phys->ds_num_children > 1) { + if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { + struct enqueue_clones_arg eca; + eca.tx = tx; + eca.originobj = ds->ds_object; + + (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa, + NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN); + } else { + VERIFY(zap_join(dp->dp_meta_objset, + ds->ds_phys->ds_next_clones_obj, + dp->dp_scrub_queue_obj, tx) == 0); + } + } + +out: + dsl_dataset_rele(ds, FTAG); +} + +/* ARGSUSED */ +static int +enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) +{ + dmu_tx_t *tx = arg; + dsl_dataset_t *ds; + int err; + dsl_pool_t *dp; + + err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds); + if (err) + return (err); + + dp = ds->ds_dir->dd_pool; + + while (ds->ds_phys->ds_prev_snap_obj != 0) { + dsl_dataset_t *prev; + err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, + FTAG, &prev); + if (err) { + dsl_dataset_rele(ds, FTAG); + return (err); + } + + /* + * If this is a clone, we don't need to worry about it for now. + */ + if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) { + dsl_dataset_rele(ds, FTAG); + dsl_dataset_rele(prev, FTAG); + return (0); + } + dsl_dataset_rele(ds, FTAG); + ds = prev; + } + + VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj, + ds->ds_object, tx) == 0); + dsl_dataset_rele(ds, FTAG); + return (0); +} + +void +dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx) +{ + zap_cursor_t zc; + zap_attribute_t za; + boolean_t complete = B_TRUE; + + if (dp->dp_scrub_func == SCRUB_FUNC_NONE) + return; + + /* If the spa is not fully loaded, don't bother. */ + if (dp->dp_spa->spa_load_state != SPA_LOAD_NONE) + return; + + if (dp->dp_scrub_restart) { + enum scrub_func func = dp->dp_scrub_func; + dp->dp_scrub_restart = B_FALSE; + dsl_pool_scrub_setup_sync(dp, &func, kcred, tx); + } + + if (dp->dp_spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) { + /* + * We must have resumed after rebooting; reset the vdev + * stats to know that we're doing a scrub (although it + * will think we're just starting now). + */ + vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, + dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER : + POOL_SCRUB_EVERYTHING, B_FALSE); + } + + dp->dp_scrub_pausing = B_FALSE; + dp->dp_scrub_start_time = lbolt64; + dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0); + dp->dp_spa->spa_scrub_active = B_TRUE; + + if (dp->dp_scrub_bookmark.zb_objset == 0) { + /* First do the MOS & ORIGIN */ + scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp); + if (dp->dp_scrub_pausing) + goto out; + + if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { + VERIFY(0 == dmu_objset_find_spa(dp->dp_spa, + NULL, enqueue_cb, tx, DS_FIND_CHILDREN)); + } else { + scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx); + } + ASSERT(!dp->dp_scrub_pausing); + } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) { + /* + * If we were paused, continue from here. Note if the + * ds we were paused on was deleted, the zb_objset will + * be -1, so we will skip this and find a new objset + * below. + */ + scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx); + if (dp->dp_scrub_pausing) + goto out; + } + + /* + * In case we were paused right at the end of the ds, zero the + * bookmark so we don't think that we're still trying to resume. + */ + bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t)); + + /* keep pulling things out of the zap-object-as-queue */ + while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj), + zap_cursor_retrieve(&zc, &za) == 0) { + VERIFY(0 == zap_remove(dp->dp_meta_objset, + dp->dp_scrub_queue_obj, za.za_name, tx)); + scrub_visitds(dp, za.za_first_integer, tx); + if (dp->dp_scrub_pausing) + break; + zap_cursor_fini(&zc); + } + zap_cursor_fini(&zc); + if (dp->dp_scrub_pausing) + goto out; + + /* done. */ + + dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx); + return; +out: + VERIFY(0 == zap_update(dp->dp_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, + &dp->dp_scrub_bookmark, tx)); + VERIFY(0 == zap_update(dp->dp_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, + &dp->dp_spa->spa_scrub_errors, tx)); + + /* XXX this is scrub-clean specific */ + mutex_enter(&dp->dp_spa->spa_scrub_lock); + while (dp->dp_spa->spa_scrub_inflight > 0) { + cv_wait(&dp->dp_spa->spa_scrub_io_cv, + &dp->dp_spa->spa_scrub_lock); + } + mutex_exit(&dp->dp_spa->spa_scrub_lock); +} + +void +dsl_pool_scrub_restart(dsl_pool_t *dp) +{ + mutex_enter(&dp->dp_scrub_cancel_lock); + dp->dp_scrub_restart = B_TRUE; + mutex_exit(&dp->dp_scrub_cancel_lock); +} + +/* + * scrub consumers + */ + +static void +count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) +{ + int i; + + /* + * If we resume after a reboot, zab will be NULL; don't record + * incomplete stats in that case. + */ + if (zab == NULL) + return; + + for (i = 0; i < 4; i++) { + int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; + int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; + zfs_blkstat_t *zb = &zab->zab_type[l][t]; + int equal; + + zb->zb_count++; + zb->zb_asize += BP_GET_ASIZE(bp); + zb->zb_lsize += BP_GET_LSIZE(bp); + zb->zb_psize += BP_GET_PSIZE(bp); + zb->zb_gangs += BP_COUNT_GANG(bp); + + switch (BP_GET_NDVAS(bp)) { + case 2: + if (DVA_GET_VDEV(&bp->blk_dva[0]) == + DVA_GET_VDEV(&bp->blk_dva[1])) + zb->zb_ditto_2_of_2_samevdev++; + break; + case 3: + equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == + DVA_GET_VDEV(&bp->blk_dva[1])) + + (DVA_GET_VDEV(&bp->blk_dva[0]) == + DVA_GET_VDEV(&bp->blk_dva[2])) + + (DVA_GET_VDEV(&bp->blk_dva[1]) == + DVA_GET_VDEV(&bp->blk_dva[2])); + if (equal == 1) + zb->zb_ditto_2_of_3_samevdev++; + else if (equal == 3) + zb->zb_ditto_3_of_3_samevdev++; + break; + } + } +} + +static void +dsl_pool_scrub_clean_done(zio_t *zio) +{ + spa_t *spa = zio->io_spa; + + zio_data_buf_free(zio->io_data, zio->io_size); + + mutex_enter(&spa->spa_scrub_lock); + spa->spa_scrub_inflight--; + cv_broadcast(&spa->spa_scrub_io_cv); + + if (zio->io_error && (zio->io_error != ECKSUM || + !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) + spa->spa_scrub_errors++; + mutex_exit(&spa->spa_scrub_lock); +} + +static int +dsl_pool_scrub_clean_cb(dsl_pool_t *dp, + const blkptr_t *bp, const zbookmark_t *zb) +{ + size_t size = BP_GET_LSIZE(bp); + int d; + spa_t *spa = dp->dp_spa; + boolean_t needs_io; + int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL; + int zio_priority; + + count_block(dp->dp_blkstats, bp); + + if (dp->dp_scrub_isresilver == 0) { + /* It's a scrub */ + zio_flags |= ZIO_FLAG_SCRUB; + zio_priority = ZIO_PRIORITY_SCRUB; + needs_io = B_TRUE; + } else { + /* It's a resilver */ + zio_flags |= ZIO_FLAG_RESILVER; + zio_priority = ZIO_PRIORITY_RESILVER; + needs_io = B_FALSE; + } + + /* If it's an intent log block, failure is expected. */ + if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET) + zio_flags |= ZIO_FLAG_SPECULATIVE; + + for (d = 0; d < BP_GET_NDVAS(bp); d++) { + vdev_t *vd = vdev_lookup_top(spa, + DVA_GET_VDEV(&bp->blk_dva[d])); + + /* + * Keep track of how much data we've examined so that + * zpool(1M) status can make useful progress reports. + */ + mutex_enter(&vd->vdev_stat_lock); + vd->vdev_stat.vs_scrub_examined += + DVA_GET_ASIZE(&bp->blk_dva[d]); + mutex_exit(&vd->vdev_stat_lock); + + /* if it's a resilver, this may not be in the target range */ + if (!needs_io) { + if (DVA_GET_GANG(&bp->blk_dva[d])) { + /* + * Gang members may be spread across multiple + * vdevs, so the best we can do is look at the + * pool-wide DTL. + * XXX -- it would be better to change our + * allocation policy to ensure that this can't + * happen. + */ + vd = spa->spa_root_vdev; + } + needs_io = vdev_dtl_contains(&vd->vdev_dtl_map, + bp->blk_birth, 1); + } + } + + if (needs_io && !zfs_no_scrub_io) { + void *data = zio_data_buf_alloc(size); + + mutex_enter(&spa->spa_scrub_lock); + while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight) + cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); + spa->spa_scrub_inflight++; + mutex_exit(&spa->spa_scrub_lock); + + zio_nowait(zio_read(NULL, spa, bp, data, size, + dsl_pool_scrub_clean_done, NULL, zio_priority, + zio_flags, zb)); + } + + /* do not relocate this block */ + return (0); +} + +int +dsl_pool_scrub_clean(dsl_pool_t *dp) +{ + /* + * Purge all vdev caches. We do this here rather than in sync + * context because this requires a writer lock on the spa_config + * lock, which we can't do from sync context. The + * spa_scrub_reopen flag indicates that vdev_open() should not + * attempt to start another scrub. + */ + spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER); + dp->dp_spa->spa_scrub_reopen = B_TRUE; + vdev_reopen(dp->dp_spa->spa_root_vdev); + dp->dp_spa->spa_scrub_reopen = B_FALSE; + spa_config_exit(dp->dp_spa, SCL_ALL, FTAG); + + return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN)); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_synctask.c b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_synctask.c new file mode 100644 index 000000000000..21100225abf7 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/dsl_synctask.c @@ -0,0 +1,225 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include + +#define DST_AVG_BLKSHIFT 14 + +/* ARGSUSED */ +static int +dsl_null_checkfunc(void *arg1, void *arg2, dmu_tx_t *tx) +{ + return (0); +} + +dsl_sync_task_group_t * +dsl_sync_task_group_create(dsl_pool_t *dp) +{ + dsl_sync_task_group_t *dstg; + + dstg = kmem_zalloc(sizeof (dsl_sync_task_group_t), KM_SLEEP); + list_create(&dstg->dstg_tasks, sizeof (dsl_sync_task_t), + offsetof(dsl_sync_task_t, dst_node)); + dstg->dstg_pool = dp; + dstg->dstg_cr = CRED(); + + return (dstg); +} + +void +dsl_sync_task_create(dsl_sync_task_group_t *dstg, + dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, + void *arg1, void *arg2, int blocks_modified) +{ + dsl_sync_task_t *dst; + + if (checkfunc == NULL) + checkfunc = dsl_null_checkfunc; + dst = kmem_zalloc(sizeof (dsl_sync_task_t), KM_SLEEP); + dst->dst_checkfunc = checkfunc; + dst->dst_syncfunc = syncfunc; + dst->dst_arg1 = arg1; + dst->dst_arg2 = arg2; + list_insert_tail(&dstg->dstg_tasks, dst); + + dstg->dstg_space += blocks_modified << DST_AVG_BLKSHIFT; +} + +int +dsl_sync_task_group_wait(dsl_sync_task_group_t *dstg) +{ + dmu_tx_t *tx; + uint64_t txg; + dsl_sync_task_t *dst; + +top: + tx = dmu_tx_create_dd(dstg->dstg_pool->dp_mos_dir); + VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); + + txg = dmu_tx_get_txg(tx); + + /* Do a preliminary error check. */ + dstg->dstg_err = 0; + rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_READER); + for (dst = list_head(&dstg->dstg_tasks); dst; + dst = list_next(&dstg->dstg_tasks, dst)) { +#ifdef ZFS_DEBUG + /* + * Only check half the time, otherwise, the sync-context + * check will almost never fail. + */ + if (spa_get_random(2) == 0) + continue; +#endif + dst->dst_err = + dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); + if (dst->dst_err) + dstg->dstg_err = dst->dst_err; + } + rw_exit(&dstg->dstg_pool->dp_config_rwlock); + + if (dstg->dstg_err) { + dmu_tx_commit(tx); + return (dstg->dstg_err); + } + + VERIFY(0 == txg_list_add(&dstg->dstg_pool->dp_sync_tasks, dstg, txg)); + + dmu_tx_commit(tx); + + txg_wait_synced(dstg->dstg_pool, txg); + + if (dstg->dstg_err == EAGAIN) + goto top; + + return (dstg->dstg_err); +} + +void +dsl_sync_task_group_nowait(dsl_sync_task_group_t *dstg, dmu_tx_t *tx) +{ + uint64_t txg; + + dstg->dstg_nowaiter = B_TRUE; + txg = dmu_tx_get_txg(tx); + VERIFY(0 == txg_list_add(&dstg->dstg_pool->dp_sync_tasks, dstg, txg)); +} + +void +dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg) +{ + dsl_sync_task_t *dst; + + while (dst = list_head(&dstg->dstg_tasks)) { + list_remove(&dstg->dstg_tasks, dst); + kmem_free(dst, sizeof (dsl_sync_task_t)); + } + kmem_free(dstg, sizeof (dsl_sync_task_group_t)); +} + +void +dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx) +{ + dsl_sync_task_t *dst; + void *tr_cookie; + + ASSERT3U(dstg->dstg_err, ==, 0); + + /* + * Check for sufficient space. + */ + dstg->dstg_err = dsl_dir_tempreserve_space(dstg->dstg_pool->dp_mos_dir, + dstg->dstg_space, dstg->dstg_space * 3, 0, 0, &tr_cookie, tx); + /* don't bother trying again */ + if (dstg->dstg_err == ERESTART) + dstg->dstg_err = EAGAIN; + if (dstg->dstg_err) + return; + + /* + * Check for errors by calling checkfuncs. + */ + rw_enter(&dstg->dstg_pool->dp_config_rwlock, RW_WRITER); + for (dst = list_head(&dstg->dstg_tasks); dst; + dst = list_next(&dstg->dstg_tasks, dst)) { + dst->dst_err = + dst->dst_checkfunc(dst->dst_arg1, dst->dst_arg2, tx); + if (dst->dst_err) + dstg->dstg_err = dst->dst_err; + } + + if (dstg->dstg_err == 0) { + /* + * Execute sync tasks. + */ + for (dst = list_head(&dstg->dstg_tasks); dst; + dst = list_next(&dstg->dstg_tasks, dst)) { + dst->dst_syncfunc(dst->dst_arg1, dst->dst_arg2, + dstg->dstg_cr, tx); + } + } + rw_exit(&dstg->dstg_pool->dp_config_rwlock); + + dsl_dir_tempreserve_clear(tr_cookie, tx); + + if (dstg->dstg_nowaiter) + dsl_sync_task_group_destroy(dstg); +} + +int +dsl_sync_task_do(dsl_pool_t *dp, + dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, + void *arg1, void *arg2, int blocks_modified) +{ + dsl_sync_task_group_t *dstg; + int err; + + dstg = dsl_sync_task_group_create(dp); + dsl_sync_task_create(dstg, checkfunc, syncfunc, + arg1, arg2, blocks_modified); + err = dsl_sync_task_group_wait(dstg); + dsl_sync_task_group_destroy(dstg); + return (err); +} + +void +dsl_sync_task_do_nowait(dsl_pool_t *dp, + dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, + void *arg1, void *arg2, int blocks_modified, dmu_tx_t *tx) +{ + dsl_sync_task_group_t *dstg; + + dstg = dsl_sync_task_group_create(dp); + dsl_sync_task_create(dstg, checkfunc, syncfunc, + arg1, arg2, blocks_modified); + dsl_sync_task_group_nowait(dstg, tx); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/fletcher.c b/external/cddl/osnet/dist/uts/common/fs/zfs/fletcher.c new file mode 100644 index 000000000000..edda3c9a9d3d --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/fletcher.c @@ -0,0 +1,145 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +void +fletcher_2_native(const void *buf, uint64_t size, zio_cksum_t *zcp) +{ + const uint64_t *ip = buf; + const uint64_t *ipend = ip + (size / sizeof (uint64_t)); + uint64_t a0, b0, a1, b1; + + for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) { + a0 += ip[0]; + a1 += ip[1]; + b0 += a0; + b1 += a1; + } + + ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1); +} + +void +fletcher_2_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp) +{ + const uint64_t *ip = buf; + const uint64_t *ipend = ip + (size / sizeof (uint64_t)); + uint64_t a0, b0, a1, b1; + + for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) { + a0 += BSWAP_64(ip[0]); + a1 += BSWAP_64(ip[1]); + b0 += a0; + b1 += a1; + } + + ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1); +} + +void +fletcher_4_native(const void *buf, uint64_t size, zio_cksum_t *zcp) +{ + const uint32_t *ip = buf; + const uint32_t *ipend = ip + (size / sizeof (uint32_t)); + uint64_t a, b, c, d; + + for (a = b = c = d = 0; ip < ipend; ip++) { + a += ip[0]; + b += a; + c += b; + d += c; + } + + ZIO_SET_CHECKSUM(zcp, a, b, c, d); +} + +void +fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp) +{ + const uint32_t *ip = buf; + const uint32_t *ipend = ip + (size / sizeof (uint32_t)); + uint64_t a, b, c, d; + + for (a = b = c = d = 0; ip < ipend; ip++) { + a += BSWAP_32(ip[0]); + b += a; + c += b; + d += c; + } + + ZIO_SET_CHECKSUM(zcp, a, b, c, d); +} + +void +fletcher_4_incremental_native(const void *buf, uint64_t size, + zio_cksum_t *zcp) +{ + const uint32_t *ip = buf; + const uint32_t *ipend = ip + (size / sizeof (uint32_t)); + uint64_t a, b, c, d; + + a = zcp->zc_word[0]; + b = zcp->zc_word[1]; + c = zcp->zc_word[2]; + d = zcp->zc_word[3]; + + for (; ip < ipend; ip++) { + a += ip[0]; + b += a; + c += b; + d += c; + } + + ZIO_SET_CHECKSUM(zcp, a, b, c, d); +} + +void +fletcher_4_incremental_byteswap(const void *buf, uint64_t size, + zio_cksum_t *zcp) +{ + const uint32_t *ip = buf; + const uint32_t *ipend = ip + (size / sizeof (uint32_t)); + uint64_t a, b, c, d; + + a = zcp->zc_word[0]; + b = zcp->zc_word[1]; + c = zcp->zc_word[2]; + d = zcp->zc_word[3]; + + for (; ip < ipend; ip++) { + a += BSWAP_32(ip[0]); + b += a; + c += b; + d += c; + } + + ZIO_SET_CHECKSUM(zcp, a, b, c, d); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/gzip.c b/external/cddl/osnet/dist/uts/common/fs/zfs/gzip.c new file mode 100644 index 000000000000..b257d4af753c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/gzip.c @@ -0,0 +1,69 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef _KERNEL +#include +#else +#include +#endif + +size_t +gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +{ + size_t dstlen = d_len; + + ASSERT(d_len <= s_len); + + if (z_compress_level(d_start, &dstlen, s_start, s_len, n) != Z_OK) { + if (d_len != s_len) + return (s_len); + + bcopy(s_start, d_start, s_len); + return (s_len); + } + + return (dstlen); +} + +/*ARGSUSED*/ +int +gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +{ + size_t dstlen = d_len; + + ASSERT(d_len >= s_len); + + if (z_uncompress(d_start, &dstlen, s_start, s_len) != Z_OK) + return (-1); + + return (0); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/lzjb.c b/external/cddl/osnet/dist/uts/common/fs/zfs/lzjb.c new file mode 100644 index 000000000000..7fcde8475e00 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/lzjb.c @@ -0,0 +1,128 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * We keep our own copy of this algorithm for 2 main reasons: + * 1. If we didn't, anyone modifying common/os/compress.c would + * directly break our on disk format + * 2. Our version of lzjb does not have a number of checks that the + * common/os version needs and uses + * In particular, we are adding the "feature" that compress() can + * take a destination buffer size and return -1 if the data will not + * compress to d_len or less. + */ + +#include + +#define MATCH_BITS 6 +#define MATCH_MIN 3 +#define MATCH_MAX ((1 << MATCH_BITS) + (MATCH_MIN - 1)) +#define OFFSET_MASK ((1 << (16 - MATCH_BITS)) - 1) +#define LEMPEL_SIZE 256 + +/*ARGSUSED*/ +size_t +lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +{ + uchar_t *src = s_start; + uchar_t *dst = d_start; + uchar_t *cpy, *copymap; + int copymask = 1 << (NBBY - 1); + int mlen, offset; + uint16_t *hp; + uint16_t lempel[LEMPEL_SIZE]; /* uninitialized; see above */ + + while (src < (uchar_t *)s_start + s_len) { + if ((copymask <<= 1) == (1 << NBBY)) { + if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) { + if (d_len != s_len) + return (s_len); + mlen = s_len; + for (src = s_start, dst = d_start; mlen; mlen--) + *dst++ = *src++; + return (s_len); + } + copymask = 1; + copymap = dst; + *dst++ = 0; + } + if (src > (uchar_t *)s_start + s_len - MATCH_MAX) { + *dst++ = *src++; + continue; + } + hp = &lempel[((src[0] + 13) ^ (src[1] - 13) ^ src[2]) & + (LEMPEL_SIZE - 1)]; + offset = (intptr_t)(src - *hp) & OFFSET_MASK; + *hp = (uint16_t)(uintptr_t)src; + cpy = src - offset; + if (cpy >= (uchar_t *)s_start && cpy != src && + src[0] == cpy[0] && src[1] == cpy[1] && src[2] == cpy[2]) { + *copymap |= copymask; + for (mlen = MATCH_MIN; mlen < MATCH_MAX; mlen++) + if (src[mlen] != cpy[mlen]) + break; + *dst++ = ((mlen - MATCH_MIN) << (NBBY - MATCH_BITS)) | + (offset >> NBBY); + *dst++ = (uchar_t)offset; + src += mlen; + } else { + *dst++ = *src++; + } + } + return (dst - (uchar_t *)d_start); +} + +/*ARGSUSED*/ +int +lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +{ + uchar_t *src = s_start; + uchar_t *dst = d_start; + uchar_t *d_end = (uchar_t *)d_start + d_len; + uchar_t *cpy, copymap; + int copymask = 1 << (NBBY - 1); + + while (dst < d_end) { + if ((copymask <<= 1) == (1 << NBBY)) { + copymask = 1; + copymap = *src++; + } + if (copymap & copymask) { + int mlen = (src[0] >> (NBBY - MATCH_BITS)) + MATCH_MIN; + int offset = ((src[0] << NBBY) | src[1]) & OFFSET_MASK; + src += 2; + if ((cpy = dst - offset) < (uchar_t *)d_start) + return (-1); + while (--mlen >= 0 && dst < d_end) + *dst++ = *cpy++; + } else { + *dst++ = *src++; + } + } + return (0); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/metaslab.c b/external/cddl/osnet/dist/uts/common/fs/zfs/metaslab.c new file mode 100644 index 000000000000..87727fac2dbe --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/metaslab.c @@ -0,0 +1,1049 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +uint64_t metaslab_aliquot = 512ULL << 10; +uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ + +/* + * ========================================================================== + * Metaslab classes + * ========================================================================== + */ +metaslab_class_t * +metaslab_class_create(void) +{ + metaslab_class_t *mc; + + mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); + + mc->mc_rotor = NULL; + + return (mc); +} + +void +metaslab_class_destroy(metaslab_class_t *mc) +{ + metaslab_group_t *mg; + + while ((mg = mc->mc_rotor) != NULL) { + metaslab_class_remove(mc, mg); + metaslab_group_destroy(mg); + } + + kmem_free(mc, sizeof (metaslab_class_t)); +} + +void +metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg) +{ + metaslab_group_t *mgprev, *mgnext; + + ASSERT(mg->mg_class == NULL); + + if ((mgprev = mc->mc_rotor) == NULL) { + mg->mg_prev = mg; + mg->mg_next = mg; + } else { + mgnext = mgprev->mg_next; + mg->mg_prev = mgprev; + mg->mg_next = mgnext; + mgprev->mg_next = mg; + mgnext->mg_prev = mg; + } + mc->mc_rotor = mg; + mg->mg_class = mc; +} + +void +metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg) +{ + metaslab_group_t *mgprev, *mgnext; + + ASSERT(mg->mg_class == mc); + + mgprev = mg->mg_prev; + mgnext = mg->mg_next; + + if (mg == mgnext) { + mc->mc_rotor = NULL; + } else { + mc->mc_rotor = mgnext; + mgprev->mg_next = mgnext; + mgnext->mg_prev = mgprev; + } + + mg->mg_prev = NULL; + mg->mg_next = NULL; + mg->mg_class = NULL; +} + +/* + * ========================================================================== + * Metaslab groups + * ========================================================================== + */ +static int +metaslab_compare(const void *x1, const void *x2) +{ + const metaslab_t *m1 = x1; + const metaslab_t *m2 = x2; + + if (m1->ms_weight < m2->ms_weight) + return (1); + if (m1->ms_weight > m2->ms_weight) + return (-1); + + /* + * If the weights are identical, use the offset to force uniqueness. + */ + if (m1->ms_map.sm_start < m2->ms_map.sm_start) + return (-1); + if (m1->ms_map.sm_start > m2->ms_map.sm_start) + return (1); + + ASSERT3P(m1, ==, m2); + + return (0); +} + +metaslab_group_t * +metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) +{ + metaslab_group_t *mg; + + mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); + mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); + avl_create(&mg->mg_metaslab_tree, metaslab_compare, + sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); + mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children); + mg->mg_vd = vd; + metaslab_class_add(mc, mg); + + return (mg); +} + +void +metaslab_group_destroy(metaslab_group_t *mg) +{ + avl_destroy(&mg->mg_metaslab_tree); + mutex_destroy(&mg->mg_lock); + kmem_free(mg, sizeof (metaslab_group_t)); +} + +static void +metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) +{ + mutex_enter(&mg->mg_lock); + ASSERT(msp->ms_group == NULL); + msp->ms_group = mg; + msp->ms_weight = 0; + avl_add(&mg->mg_metaslab_tree, msp); + mutex_exit(&mg->mg_lock); +} + +static void +metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) +{ + mutex_enter(&mg->mg_lock); + ASSERT(msp->ms_group == mg); + avl_remove(&mg->mg_metaslab_tree, msp); + msp->ms_group = NULL; + mutex_exit(&mg->mg_lock); +} + +static void +metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) +{ + /* + * Although in principle the weight can be any value, in + * practice we do not use values in the range [1, 510]. + */ + ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); + ASSERT(MUTEX_HELD(&msp->ms_lock)); + + mutex_enter(&mg->mg_lock); + ASSERT(msp->ms_group == mg); + avl_remove(&mg->mg_metaslab_tree, msp); + msp->ms_weight = weight; + avl_add(&mg->mg_metaslab_tree, msp); + mutex_exit(&mg->mg_lock); +} + +/* + * ========================================================================== + * The first-fit block allocator + * ========================================================================== + */ +static void +metaslab_ff_load(space_map_t *sm) +{ + ASSERT(sm->sm_ppd == NULL); + sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); +} + +static void +metaslab_ff_unload(space_map_t *sm) +{ + kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); + sm->sm_ppd = NULL; +} + +static uint64_t +metaslab_ff_alloc(space_map_t *sm, uint64_t size) +{ + avl_tree_t *t = &sm->sm_root; + uint64_t align = size & -size; + uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; + space_seg_t *ss, ssearch; + avl_index_t where; + + ssearch.ss_start = *cursor; + ssearch.ss_end = *cursor + size; + + ss = avl_find(t, &ssearch, &where); + if (ss == NULL) + ss = avl_nearest(t, where, AVL_AFTER); + + while (ss != NULL) { + uint64_t offset = P2ROUNDUP(ss->ss_start, align); + + if (offset + size <= ss->ss_end) { + *cursor = offset + size; + return (offset); + } + ss = AVL_NEXT(t, ss); + } + + /* + * If we know we've searched the whole map (*cursor == 0), give up. + * Otherwise, reset the cursor to the beginning and try again. + */ + if (*cursor == 0) + return (-1ULL); + + *cursor = 0; + return (metaslab_ff_alloc(sm, size)); +} + +/* ARGSUSED */ +static void +metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) +{ + /* No need to update cursor */ +} + +/* ARGSUSED */ +static void +metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) +{ + /* No need to update cursor */ +} + +static space_map_ops_t metaslab_ff_ops = { + metaslab_ff_load, + metaslab_ff_unload, + metaslab_ff_alloc, + metaslab_ff_claim, + metaslab_ff_free +}; + +/* + * ========================================================================== + * Metaslabs + * ========================================================================== + */ +metaslab_t * +metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, + uint64_t start, uint64_t size, uint64_t txg) +{ + vdev_t *vd = mg->mg_vd; + metaslab_t *msp; + + msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); + mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); + + msp->ms_smo_syncing = *smo; + + /* + * We create the main space map here, but we don't create the + * allocmaps and freemaps until metaslab_sync_done(). This serves + * two purposes: it allows metaslab_sync_done() to detect the + * addition of new space; and for debugging, it ensures that we'd + * data fault on any attempt to use this metaslab before it's ready. + */ + space_map_create(&msp->ms_map, start, size, + vd->vdev_ashift, &msp->ms_lock); + + metaslab_group_add(mg, msp); + + /* + * If we're opening an existing pool (txg == 0) or creating + * a new one (txg == TXG_INITIAL), all space is available now. + * If we're adding space to an existing pool, the new space + * does not become available until after this txg has synced. + */ + if (txg <= TXG_INITIAL) + metaslab_sync_done(msp, 0); + + if (txg != 0) { + /* + * The vdev is dirty, but the metaslab isn't -- it just needs + * to have metaslab_sync_done() invoked from vdev_sync_done(). + * [We could just dirty the metaslab, but that would cause us + * to allocate a space map object for it, which is wasteful + * and would mess up the locality logic in metaslab_weight().] + */ + ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa)); + vdev_dirty(vd, 0, NULL, txg); + vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg)); + } + + return (msp); +} + +void +metaslab_fini(metaslab_t *msp) +{ + metaslab_group_t *mg = msp->ms_group; + int t; + + vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size, + -msp->ms_smo.smo_alloc, B_TRUE); + + metaslab_group_remove(mg, msp); + + mutex_enter(&msp->ms_lock); + + space_map_unload(&msp->ms_map); + space_map_destroy(&msp->ms_map); + + for (t = 0; t < TXG_SIZE; t++) { + space_map_destroy(&msp->ms_allocmap[t]); + space_map_destroy(&msp->ms_freemap[t]); + } + + mutex_exit(&msp->ms_lock); + mutex_destroy(&msp->ms_lock); + + kmem_free(msp, sizeof (metaslab_t)); +} + +#define METASLAB_WEIGHT_PRIMARY (1ULL << 63) +#define METASLAB_WEIGHT_SECONDARY (1ULL << 62) +#define METASLAB_ACTIVE_MASK \ + (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) +#define METASLAB_SMO_BONUS_MULTIPLIER 2 + +static uint64_t +metaslab_weight(metaslab_t *msp) +{ + metaslab_group_t *mg = msp->ms_group; + space_map_t *sm = &msp->ms_map; + space_map_obj_t *smo = &msp->ms_smo; + vdev_t *vd = mg->mg_vd; + uint64_t weight, space; + + ASSERT(MUTEX_HELD(&msp->ms_lock)); + + /* + * The baseline weight is the metaslab's free space. + */ + space = sm->sm_size - smo->smo_alloc; + weight = space; + + /* + * Modern disks have uniform bit density and constant angular velocity. + * Therefore, the outer recording zones are faster (higher bandwidth) + * than the inner zones by the ratio of outer to inner track diameter, + * which is typically around 2:1. We account for this by assigning + * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). + * In effect, this means that we'll select the metaslab with the most + * free bandwidth rather than simply the one with the most free space. + */ + weight = 2 * weight - + ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; + ASSERT(weight >= space && weight <= 2 * space); + + /* + * For locality, assign higher weight to metaslabs we've used before. + */ + if (smo->smo_object != 0) + weight *= METASLAB_SMO_BONUS_MULTIPLIER; + ASSERT(weight >= space && + weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space); + + /* + * If this metaslab is one we're actively using, adjust its weight to + * make it preferable to any inactive metaslab so we'll polish it off. + */ + weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); + + return (weight); +} + +static int +metaslab_activate(metaslab_t *msp, uint64_t activation_weight) +{ + space_map_t *sm = &msp->ms_map; + + ASSERT(MUTEX_HELD(&msp->ms_lock)); + + if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { + int error = space_map_load(sm, &metaslab_ff_ops, + SM_FREE, &msp->ms_smo, + msp->ms_group->mg_vd->vdev_spa->spa_meta_objset); + if (error) { + metaslab_group_sort(msp->ms_group, msp, 0); + return (error); + } + metaslab_group_sort(msp->ms_group, msp, + msp->ms_weight | activation_weight); + } + ASSERT(sm->sm_loaded); + ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); + + return (0); +} + +static void +metaslab_passivate(metaslab_t *msp, uint64_t size) +{ + /* + * If size < SPA_MINBLOCKSIZE, then we will not allocate from + * this metaslab again. In that case, it had better be empty, + * or we would be leaving space on the table. + */ + ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); + metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); + ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); +} + +/* + * Write a metaslab to disk in the context of the specified transaction group. + */ +void +metaslab_sync(metaslab_t *msp, uint64_t txg) +{ + vdev_t *vd = msp->ms_group->mg_vd; + spa_t *spa = vd->vdev_spa; + objset_t *mos = spa->spa_meta_objset; + space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; + space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; + space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; + space_map_t *sm = &msp->ms_map; + space_map_obj_t *smo = &msp->ms_smo_syncing; + dmu_buf_t *db; + dmu_tx_t *tx; + int t; + + tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); + + /* + * The only state that can actually be changing concurrently with + * metaslab_sync() is the metaslab's ms_map. No other thread can + * be modifying this txg's allocmap, freemap, freed_map, or smo. + * Therefore, we only hold ms_lock to satify space_map ASSERTs. + * We drop it whenever we call into the DMU, because the DMU + * can call down to us (e.g. via zio_free()) at any time. + */ + mutex_enter(&msp->ms_lock); + + if (smo->smo_object == 0) { + ASSERT(smo->smo_objsize == 0); + ASSERT(smo->smo_alloc == 0); + mutex_exit(&msp->ms_lock); + smo->smo_object = dmu_object_alloc(mos, + DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, + DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); + ASSERT(smo->smo_object != 0); + dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * + (sm->sm_start >> vd->vdev_ms_shift), + sizeof (uint64_t), &smo->smo_object, tx); + mutex_enter(&msp->ms_lock); + } + + space_map_walk(freemap, space_map_add, freed_map); + + if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= + 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { + /* + * The in-core space map representation is twice as compact + * as the on-disk one, so it's time to condense the latter + * by generating a pure allocmap from first principles. + * + * This metaslab is 100% allocated, + * minus the content of the in-core map (sm), + * minus what's been freed this txg (freed_map), + * minus allocations from txgs in the future + * (because they haven't been committed yet). + */ + space_map_vacate(allocmap, NULL, NULL); + space_map_vacate(freemap, NULL, NULL); + + space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); + + space_map_walk(sm, space_map_remove, allocmap); + space_map_walk(freed_map, space_map_remove, allocmap); + + for (t = 1; t < TXG_CONCURRENT_STATES; t++) + space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], + space_map_remove, allocmap); + + mutex_exit(&msp->ms_lock); + space_map_truncate(smo, mos, tx); + mutex_enter(&msp->ms_lock); + } + + space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); + space_map_sync(freemap, SM_FREE, smo, mos, tx); + + mutex_exit(&msp->ms_lock); + + VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); + dmu_buf_will_dirty(db, tx); + ASSERT3U(db->db_size, >=, sizeof (*smo)); + bcopy(smo, db->db_data, sizeof (*smo)); + dmu_buf_rele(db, FTAG); + + dmu_tx_commit(tx); +} + +/* + * Called after a transaction group has completely synced to mark + * all of the metaslab's free space as usable. + */ +void +metaslab_sync_done(metaslab_t *msp, uint64_t txg) +{ + space_map_obj_t *smo = &msp->ms_smo; + space_map_obj_t *smosync = &msp->ms_smo_syncing; + space_map_t *sm = &msp->ms_map; + space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; + metaslab_group_t *mg = msp->ms_group; + vdev_t *vd = mg->mg_vd; + int t; + + mutex_enter(&msp->ms_lock); + + /* + * If this metaslab is just becoming available, initialize its + * allocmaps and freemaps and add its capacity to the vdev. + */ + if (freed_map->sm_size == 0) { + for (t = 0; t < TXG_SIZE; t++) { + space_map_create(&msp->ms_allocmap[t], sm->sm_start, + sm->sm_size, sm->sm_shift, sm->sm_lock); + space_map_create(&msp->ms_freemap[t], sm->sm_start, + sm->sm_size, sm->sm_shift, sm->sm_lock); + } + vdev_space_update(vd, sm->sm_size, 0, B_TRUE); + } + + vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE); + + ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); + ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); + + /* + * If there's a space_map_load() in progress, wait for it to complete + * so that we have a consistent view of the in-core space map. + * Then, add everything we freed in this txg to the map. + */ + space_map_load_wait(sm); + space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm); + + *smo = *smosync; + + /* + * If the map is loaded but no longer active, evict it as soon as all + * future allocations have synced. (If we unloaded it now and then + * loaded a moment later, the map wouldn't reflect those allocations.) + */ + if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { + int evictable = 1; + + for (t = 1; t < TXG_CONCURRENT_STATES; t++) + if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) + evictable = 0; + + if (evictable) + space_map_unload(sm); + } + + metaslab_group_sort(mg, msp, metaslab_weight(msp)); + + mutex_exit(&msp->ms_lock); +} + +static uint64_t +metaslab_distance(metaslab_t *msp, dva_t *dva) +{ + uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; + uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; + uint64_t start = msp->ms_map.sm_start >> ms_shift; + + if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) + return (1ULL << 63); + + if (offset < start) + return ((start - offset) << ms_shift); + if (offset > start) + return ((offset - start) << ms_shift); + return (0); +} + +static uint64_t +metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, + uint64_t min_distance, dva_t *dva, int d) +{ + metaslab_t *msp = NULL; + uint64_t offset = -1ULL; + avl_tree_t *t = &mg->mg_metaslab_tree; + uint64_t activation_weight; + uint64_t target_distance; + int i; + + activation_weight = METASLAB_WEIGHT_PRIMARY; + for (i = 0; i < d; i++) + if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) + activation_weight = METASLAB_WEIGHT_SECONDARY; + + for (;;) { + mutex_enter(&mg->mg_lock); + for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { + if (msp->ms_weight < size) { + mutex_exit(&mg->mg_lock); + return (-1ULL); + } + + if (activation_weight == METASLAB_WEIGHT_PRIMARY) + break; + + target_distance = min_distance + + (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); + + for (i = 0; i < d; i++) + if (metaslab_distance(msp, &dva[i]) < + target_distance) + break; + if (i == d) + break; + } + mutex_exit(&mg->mg_lock); + if (msp == NULL) + return (-1ULL); + + mutex_enter(&msp->ms_lock); + + /* + * Ensure that the metaslab we have selected is still + * capable of handling our request. It's possible that + * another thread may have changed the weight while we + * were blocked on the metaslab lock. + */ + if (msp->ms_weight < size) { + mutex_exit(&msp->ms_lock); + continue; + } + + if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && + activation_weight == METASLAB_WEIGHT_PRIMARY) { + metaslab_passivate(msp, + msp->ms_weight & ~METASLAB_ACTIVE_MASK); + mutex_exit(&msp->ms_lock); + continue; + } + + if (metaslab_activate(msp, activation_weight) != 0) { + mutex_exit(&msp->ms_lock); + continue; + } + + if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) + break; + + metaslab_passivate(msp, size - 1); + + mutex_exit(&msp->ms_lock); + } + + if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) + vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); + + space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); + + mutex_exit(&msp->ms_lock); + + return (offset); +} + +/* + * Allocate a block for the specified i/o. + */ +static int +metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, + dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) +{ + metaslab_group_t *mg, *rotor; + vdev_t *vd; + int dshift = 3; + int all_zero; + uint64_t offset = -1ULL; + uint64_t asize; + uint64_t distance; + + ASSERT(!DVA_IS_VALID(&dva[d])); + + /* + * For testing, make some blocks above a certain size be gang blocks. + */ + if (psize >= metaslab_gang_bang && (lbolt & 3) == 0) + return (ENOSPC); + + /* + * Start at the rotor and loop through all mgs until we find something. + * Note that there's no locking on mc_rotor or mc_allocated because + * nothing actually breaks if we miss a few updates -- we just won't + * allocate quite as evenly. It all balances out over time. + * + * If we are doing ditto or log blocks, try to spread them across + * consecutive vdevs. If we're forced to reuse a vdev before we've + * allocated all of our ditto blocks, then try and spread them out on + * that vdev as much as possible. If it turns out to not be possible, + * gradually lower our standards until anything becomes acceptable. + * Also, allocating on consecutive vdevs (as opposed to random vdevs) + * gives us hope of containing our fault domains to something we're + * able to reason about. Otherwise, any two top-level vdev failures + * will guarantee the loss of data. With consecutive allocation, + * only two adjacent top-level vdev failures will result in data loss. + * + * If we are doing gang blocks (hintdva is non-NULL), try to keep + * ourselves on the same vdev as our gang block header. That + * way, we can hope for locality in vdev_cache, plus it makes our + * fault domains something tractable. + */ + if (hintdva) { + vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); + if (flags & METASLAB_HINTBP_AVOID) + mg = vd->vdev_mg->mg_next; + else + mg = vd->vdev_mg; + } else if (d != 0) { + vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); + mg = vd->vdev_mg->mg_next; + } else { + mg = mc->mc_rotor; + } + + /* + * If the hint put us into the wrong class, just follow the rotor. + */ + if (mg->mg_class != mc) + mg = mc->mc_rotor; + + rotor = mg; +top: + all_zero = B_TRUE; + do { + vd = mg->mg_vd; + /* + * Don't allocate from faulted devices. + */ + if (!vdev_allocatable(vd)) + goto next; + /* + * Avoid writing single-copy data to a failing vdev + */ + if ((vd->vdev_stat.vs_write_errors > 0 || + vd->vdev_state < VDEV_STATE_HEALTHY) && + d == 0 && dshift == 3) { + all_zero = B_FALSE; + goto next; + } + + ASSERT(mg->mg_class == mc); + + distance = vd->vdev_asize >> dshift; + if (distance <= (1ULL << vd->vdev_ms_shift)) + distance = 0; + else + all_zero = B_FALSE; + + asize = vdev_psize_to_asize(vd, psize); + ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); + + offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); + if (offset != -1ULL) { + /* + * If we've just selected this metaslab group, + * figure out whether the corresponding vdev is + * over- or under-used relative to the pool, + * and set an allocation bias to even it out. + */ + if (mc->mc_allocated == 0) { + vdev_stat_t *vs = &vd->vdev_stat; + uint64_t alloc, space; + int64_t vu, su; + + alloc = spa_get_alloc(spa); + space = spa_get_space(spa); + + /* + * Determine percent used in units of 0..1024. + * (This is just to avoid floating point.) + */ + vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); + su = (alloc << 10) / (space + 1); + + /* + * Bias by at most +/- 25% of the aliquot. + */ + mg->mg_bias = ((su - vu) * + (int64_t)mg->mg_aliquot) / (1024 * 4); + } + + if (atomic_add_64_nv(&mc->mc_allocated, asize) >= + mg->mg_aliquot + mg->mg_bias) { + mc->mc_rotor = mg->mg_next; + mc->mc_allocated = 0; + } + + DVA_SET_VDEV(&dva[d], vd->vdev_id); + DVA_SET_OFFSET(&dva[d], offset); + DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); + DVA_SET_ASIZE(&dva[d], asize); + + return (0); + } +next: + mc->mc_rotor = mg->mg_next; + mc->mc_allocated = 0; + } while ((mg = mg->mg_next) != rotor); + + if (!all_zero) { + dshift++; + ASSERT(dshift < 64); + goto top; + } + + bzero(&dva[d], sizeof (dva_t)); + + return (ENOSPC); +} + +/* + * Free the block represented by DVA in the context of the specified + * transaction group. + */ +static void +metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) +{ + uint64_t vdev = DVA_GET_VDEV(dva); + uint64_t offset = DVA_GET_OFFSET(dva); + uint64_t size = DVA_GET_ASIZE(dva); + vdev_t *vd; + metaslab_t *msp; + + ASSERT(DVA_IS_VALID(dva)); + + if (txg > spa_freeze_txg(spa)) + return; + + if ((vd = vdev_lookup_top(spa, vdev)) == NULL || + (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { + cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", + (u_longlong_t)vdev, (u_longlong_t)offset); + ASSERT(0); + return; + } + + msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; + + if (DVA_GET_GANG(dva)) + size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); + + mutex_enter(&msp->ms_lock); + + if (now) { + space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], + offset, size); + space_map_free(&msp->ms_map, offset, size); + } else { + if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) + vdev_dirty(vd, VDD_METASLAB, msp, txg); + space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); + } + + mutex_exit(&msp->ms_lock); +} + +/* + * Intent log support: upon opening the pool after a crash, notify the SPA + * of blocks that the intent log has allocated for immediate write, but + * which are still considered free by the SPA because the last transaction + * group didn't commit yet. + */ +static int +metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) +{ + uint64_t vdev = DVA_GET_VDEV(dva); + uint64_t offset = DVA_GET_OFFSET(dva); + uint64_t size = DVA_GET_ASIZE(dva); + vdev_t *vd; + metaslab_t *msp; + int error; + + ASSERT(DVA_IS_VALID(dva)); + + if ((vd = vdev_lookup_top(spa, vdev)) == NULL || + (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) + return (ENXIO); + + msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; + + if (DVA_GET_GANG(dva)) + size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); + + mutex_enter(&msp->ms_lock); + + error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); + if (error || txg == 0) { /* txg == 0 indicates dry run */ + mutex_exit(&msp->ms_lock); + return (error); + } + + space_map_claim(&msp->ms_map, offset, size); + + if (spa_mode & FWRITE) { /* don't dirty if we're zdb(1M) */ + if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) + vdev_dirty(vd, VDD_METASLAB, msp, txg); + space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); + } + + mutex_exit(&msp->ms_lock); + + return (0); +} + +int +metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, + int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) +{ + dva_t *dva = bp->blk_dva; + dva_t *hintdva = hintbp->blk_dva; + int error = 0; + + ASSERT(bp->blk_birth == 0); + + spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); + + if (mc->mc_rotor == NULL) { /* no vdevs in this class */ + spa_config_exit(spa, SCL_ALLOC, FTAG); + return (ENOSPC); + } + + ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); + ASSERT(BP_GET_NDVAS(bp) == 0); + ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); + + for (int d = 0; d < ndvas; d++) { + error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, + txg, flags); + if (error) { + for (d--; d >= 0; d--) { + metaslab_free_dva(spa, &dva[d], txg, B_TRUE); + bzero(&dva[d], sizeof (dva_t)); + } + spa_config_exit(spa, SCL_ALLOC, FTAG); + return (error); + } + } + ASSERT(error == 0); + ASSERT(BP_GET_NDVAS(bp) == ndvas); + + spa_config_exit(spa, SCL_ALLOC, FTAG); + + bp->blk_birth = txg; + + return (0); +} + +void +metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) +{ + const dva_t *dva = bp->blk_dva; + int ndvas = BP_GET_NDVAS(bp); + + ASSERT(!BP_IS_HOLE(bp)); + ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg); + + spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); + + for (int d = 0; d < ndvas; d++) + metaslab_free_dva(spa, &dva[d], txg, now); + + spa_config_exit(spa, SCL_FREE, FTAG); +} + +int +metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) +{ + const dva_t *dva = bp->blk_dva; + int ndvas = BP_GET_NDVAS(bp); + int error = 0; + + ASSERT(!BP_IS_HOLE(bp)); + + if (txg != 0) { + /* + * First do a dry run to make sure all DVAs are claimable, + * so we don't have to unwind from partial failures below. + */ + if ((error = metaslab_claim(spa, bp, 0)) != 0) + return (error); + } + + spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); + + for (int d = 0; d < ndvas; d++) + if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) + break; + + spa_config_exit(spa, SCL_ALLOC, FTAG); + + ASSERT(error == 0 || txg == 0); + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/refcount.c b/external/cddl/osnet/dist/uts/common/fs/zfs/refcount.c new file mode 100644 index 000000000000..f1b3b23fe2b3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/refcount.c @@ -0,0 +1,195 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#if defined(DEBUG) || !defined(_KERNEL) + +#ifdef _KERNEL +int reference_tracking_enable = FALSE; /* runs out of memory too easily */ +#else +int reference_tracking_enable = TRUE; +#endif +int reference_history = 4; /* tunable */ + +static kmem_cache_t *reference_cache; +static kmem_cache_t *reference_history_cache; + +void +refcount_init(void) +{ + reference_cache = kmem_cache_create("reference_cache", + sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0); + + reference_history_cache = kmem_cache_create("reference_history_cache", + sizeof (uint64_t), 0, NULL, NULL, NULL, NULL, NULL, 0); +} + +void +refcount_fini(void) +{ + kmem_cache_destroy(reference_cache); + kmem_cache_destroy(reference_history_cache); +} + +void +refcount_create(refcount_t *rc) +{ + mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL); + list_create(&rc->rc_list, sizeof (reference_t), + offsetof(reference_t, ref_link)); + list_create(&rc->rc_removed, sizeof (reference_t), + offsetof(reference_t, ref_link)); + rc->rc_count = 0; + rc->rc_removed_count = 0; +} + +void +refcount_destroy_many(refcount_t *rc, uint64_t number) +{ + reference_t *ref; + + ASSERT(rc->rc_count == number); + while (ref = list_head(&rc->rc_list)) { + list_remove(&rc->rc_list, ref); + kmem_cache_free(reference_cache, ref); + } + list_destroy(&rc->rc_list); + + while (ref = list_head(&rc->rc_removed)) { + list_remove(&rc->rc_removed, ref); + kmem_cache_free(reference_history_cache, ref->ref_removed); + kmem_cache_free(reference_cache, ref); + } + list_destroy(&rc->rc_removed); + mutex_destroy(&rc->rc_mtx); +} + +void +refcount_destroy(refcount_t *rc) +{ + refcount_destroy_many(rc, 0); +} + +int +refcount_is_zero(refcount_t *rc) +{ + ASSERT(rc->rc_count >= 0); + return (rc->rc_count == 0); +} + +int64_t +refcount_count(refcount_t *rc) +{ + ASSERT(rc->rc_count >= 0); + return (rc->rc_count); +} + +int64_t +refcount_add_many(refcount_t *rc, uint64_t number, void *holder) +{ + reference_t *ref; + int64_t count; + + if (reference_tracking_enable) { + ref = kmem_cache_alloc(reference_cache, KM_SLEEP); + ref->ref_holder = holder; + ref->ref_number = number; + } + mutex_enter(&rc->rc_mtx); + ASSERT(rc->rc_count >= 0); + if (reference_tracking_enable) + list_insert_head(&rc->rc_list, ref); + rc->rc_count += number; + count = rc->rc_count; + mutex_exit(&rc->rc_mtx); + + return (count); +} + +int64_t +refcount_add(refcount_t *rc, void *holder) +{ + return (refcount_add_many(rc, 1, holder)); +} + +int64_t +refcount_remove_many(refcount_t *rc, uint64_t number, void *holder) +{ + reference_t *ref; + int64_t count; + + mutex_enter(&rc->rc_mtx); + ASSERT(rc->rc_count >= number); + + if (!reference_tracking_enable) { + rc->rc_count -= number; + count = rc->rc_count; + mutex_exit(&rc->rc_mtx); + return (count); + } + + for (ref = list_head(&rc->rc_list); ref; + ref = list_next(&rc->rc_list, ref)) { + if (ref->ref_holder == holder && ref->ref_number == number) { + list_remove(&rc->rc_list, ref); + if (reference_history > 0) { + ref->ref_removed = + kmem_cache_alloc(reference_history_cache, + KM_SLEEP); + list_insert_head(&rc->rc_removed, ref); + rc->rc_removed_count++; + if (rc->rc_removed_count >= reference_history) { + ref = list_tail(&rc->rc_removed); + list_remove(&rc->rc_removed, ref); + kmem_cache_free(reference_history_cache, + ref->ref_removed); + kmem_cache_free(reference_cache, ref); + rc->rc_removed_count--; + } + } else { + kmem_cache_free(reference_cache, ref); + } + rc->rc_count -= number; + count = rc->rc_count; + mutex_exit(&rc->rc_mtx); + return (count); + } + } + panic("No such hold %p on refcount %llx", holder, + (u_longlong_t)(uintptr_t)rc); + return (-1); +} + +int64_t +refcount_remove(refcount_t *rc, void *holder) +{ + return (refcount_remove_many(rc, 1, holder)); +} + +#endif diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/rrwlock.c b/external/cddl/osnet/dist/uts/common/fs/zfs/rrwlock.c new file mode 100644 index 000000000000..710685dbc71e --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/rrwlock.c @@ -0,0 +1,249 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +/* + * This file contains the implementation of a re-entrant read + * reader/writer lock (aka "rrwlock"). + * + * This is a normal reader/writer lock with the additional feature + * of allowing threads who have already obtained a read lock to + * re-enter another read lock (re-entrant read) - even if there are + * waiting writers. + * + * Callers who have not obtained a read lock give waiting writers priority. + * + * The rrwlock_t lock does not allow re-entrant writers, nor does it + * allow a re-entrant mix of reads and writes (that is, it does not + * allow a caller who has already obtained a read lock to be able to + * then grab a write lock without first dropping all read locks, and + * vice versa). + * + * The rrwlock_t uses tsd (thread specific data) to keep a list of + * nodes (rrw_node_t), where each node keeps track of which specific + * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering + * should be rare, a thread that grabs multiple reads on the same rrwlock_t + * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the + * tsd list can represent a different rrwlock_t. This allows a thread + * to enter multiple and unique rrwlock_ts for read locks at the same time. + * + * Since using tsd exposes some overhead, the rrwlock_t only needs to + * keep tsd data when writers are waiting. If no writers are waiting, then + * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd + * is needed. Once a writer attempts to grab the lock, readers then + * keep tsd data and bump the linked readers count (rr_linked_rcount). + * + * If there are waiting writers and there are anonymous readers, then a + * reader doesn't know if it is a re-entrant lock. But since it may be one, + * we allow the read to proceed (otherwise it could deadlock). Since once + * waiting writers are active, readers no longer bump the anonymous count, + * the anonymous readers will eventually flush themselves out. At this point, + * readers will be able to tell if they are a re-entrant lock (have a + * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then + * we must let the proceed. If they are not, then the reader blocks for the + * waiting writers. Hence, we do not starve writers. + */ + +/* global key for TSD */ +uint_t rrw_tsd_key; + +typedef struct rrw_node { + struct rrw_node *rn_next; + rrwlock_t *rn_rrl; +} rrw_node_t; + +static rrw_node_t * +rrn_find(rrwlock_t *rrl) +{ + rrw_node_t *rn; + + if (refcount_count(&rrl->rr_linked_rcount) == 0) + return (NULL); + + for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) { + if (rn->rn_rrl == rrl) + return (rn); + } + return (NULL); +} + +/* + * Add a node to the head of the singly linked list. + */ +static void +rrn_add(rrwlock_t *rrl) +{ + rrw_node_t *rn; + + rn = kmem_alloc(sizeof (*rn), KM_SLEEP); + rn->rn_rrl = rrl; + rn->rn_next = tsd_get(rrw_tsd_key); + VERIFY(tsd_set(rrw_tsd_key, rn) == 0); +} + +/* + * If a node is found for 'rrl', then remove the node from this + * thread's list and return TRUE; otherwise return FALSE. + */ +static boolean_t +rrn_find_and_remove(rrwlock_t *rrl) +{ + rrw_node_t *rn; + rrw_node_t *prev = NULL; + + if (refcount_count(&rrl->rr_linked_rcount) == 0) + return (NULL); + + for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) { + if (rn->rn_rrl == rrl) { + if (prev) + prev->rn_next = rn->rn_next; + else + VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0); + kmem_free(rn, sizeof (*rn)); + return (B_TRUE); + } + prev = rn; + } + return (B_FALSE); +} + +void +rrw_init(rrwlock_t *rrl) +{ + mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL); + rrl->rr_writer = NULL; + refcount_create(&rrl->rr_anon_rcount); + refcount_create(&rrl->rr_linked_rcount); + rrl->rr_writer_wanted = B_FALSE; +} + +void +rrw_destroy(rrwlock_t *rrl) +{ + mutex_destroy(&rrl->rr_lock); + cv_destroy(&rrl->rr_cv); + ASSERT(rrl->rr_writer == NULL); + refcount_destroy(&rrl->rr_anon_rcount); + refcount_destroy(&rrl->rr_linked_rcount); +} + +static void +rrw_enter_read(rrwlock_t *rrl, void *tag) +{ + mutex_enter(&rrl->rr_lock); + ASSERT(rrl->rr_writer != curthread); + ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0); + + while (rrl->rr_writer || (rrl->rr_writer_wanted && + refcount_is_zero(&rrl->rr_anon_rcount) && + rrn_find(rrl) == NULL)) + cv_wait(&rrl->rr_cv, &rrl->rr_lock); + + if (rrl->rr_writer_wanted) { + /* may or may not be a re-entrant enter */ + rrn_add(rrl); + (void) refcount_add(&rrl->rr_linked_rcount, tag); + } else { + (void) refcount_add(&rrl->rr_anon_rcount, tag); + } + ASSERT(rrl->rr_writer == NULL); + mutex_exit(&rrl->rr_lock); +} + +static void +rrw_enter_write(rrwlock_t *rrl) +{ + mutex_enter(&rrl->rr_lock); + ASSERT(rrl->rr_writer != curthread); + + while (refcount_count(&rrl->rr_anon_rcount) > 0 || + refcount_count(&rrl->rr_linked_rcount) > 0 || + rrl->rr_writer != NULL) { + rrl->rr_writer_wanted = B_TRUE; + cv_wait(&rrl->rr_cv, &rrl->rr_lock); + } + rrl->rr_writer_wanted = B_FALSE; + rrl->rr_writer = curthread; + mutex_exit(&rrl->rr_lock); +} + +void +rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag) +{ + if (rw == RW_READER) + rrw_enter_read(rrl, tag); + else + rrw_enter_write(rrl); +} + +void +rrw_exit(rrwlock_t *rrl, void *tag) +{ + mutex_enter(&rrl->rr_lock); + ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) || + !refcount_is_zero(&rrl->rr_linked_rcount) || + rrl->rr_writer != NULL); + + if (rrl->rr_writer == NULL) { + if (rrn_find_and_remove(rrl)) { + if (refcount_remove(&rrl->rr_linked_rcount, tag) == 0) + cv_broadcast(&rrl->rr_cv); + + } else { + if (refcount_remove(&rrl->rr_anon_rcount, tag) == 0) + cv_broadcast(&rrl->rr_cv); + } + } else { + ASSERT(rrl->rr_writer == curthread); + ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) && + refcount_is_zero(&rrl->rr_linked_rcount)); + rrl->rr_writer = NULL; + cv_broadcast(&rrl->rr_cv); + } + mutex_exit(&rrl->rr_lock); +} + +boolean_t +rrw_held(rrwlock_t *rrl, krw_t rw) +{ + boolean_t held; + + mutex_enter(&rrl->rr_lock); + if (rw == RW_WRITER) { + held = (rrl->rr_writer == curthread); + } else { + held = (!refcount_is_zero(&rrl->rr_anon_rcount) || + !refcount_is_zero(&rrl->rr_linked_rcount)); + } + mutex_exit(&rrl->rr_lock); + + return (held); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sha256.c b/external/cddl/osnet/dist/uts/common/fs/zfs/sha256.c new file mode 100644 index 000000000000..ca7076cb6fd9 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sha256.c @@ -0,0 +1,129 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +/* + * SHA-256 checksum, as specified in FIPS 180-3, available at: + * http://csrc.nist.gov/publications/PubsFIPS.html + * + * This is a very compact implementation of SHA-256. + * It is designed to be simple and portable, not to be fast. + */ + +/* + * The literal definitions of Ch() and Maj() according to FIPS 180-3 are: + * + * Ch(x, y, z) (x & y) ^ (~x & z) + * Maj(x, y, z) (x & y) ^ (x & z) ^ (y & z) + * + * We use equivalent logical reductions here that require one less op. + */ +#define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) +#define Maj(x, y, z) (((x) & (y)) ^ ((z) & ((x) ^ (y)))) +#define Rot32(x, s) (((x) >> s) | ((x) << (32 - s))) +#define SIGMA0(x) (Rot32(x, 2) ^ Rot32(x, 13) ^ Rot32(x, 22)) +#define SIGMA1(x) (Rot32(x, 6) ^ Rot32(x, 11) ^ Rot32(x, 25)) +#define sigma0(x) (Rot32(x, 7) ^ Rot32(x, 18) ^ ((x) >> 3)) +#define sigma1(x) (Rot32(x, 17) ^ Rot32(x, 19) ^ ((x) >> 10)) + +static const uint32_t SHA256_K[64] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +}; + +static void +SHA256Transform(uint32_t *H, const uint8_t *cp) +{ + uint32_t a, b, c, d, e, f, g, h, t, T1, T2, W[64]; + + for (t = 0; t < 16; t++, cp += 4) + W[t] = (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | cp[3]; + + for (t = 16; t < 64; t++) + W[t] = sigma1(W[t - 2]) + W[t - 7] + + sigma0(W[t - 15]) + W[t - 16]; + + a = H[0]; b = H[1]; c = H[2]; d = H[3]; + e = H[4]; f = H[5]; g = H[6]; h = H[7]; + + for (t = 0; t < 64; t++) { + T1 = h + SIGMA1(e) + Ch(e, f, g) + SHA256_K[t] + W[t]; + T2 = SIGMA0(a) + Maj(a, b, c); + h = g; g = f; f = e; e = d + T1; + d = c; c = b; b = a; a = T1 + T2; + } + + H[0] += a; H[1] += b; H[2] += c; H[3] += d; + H[4] += e; H[5] += f; H[6] += g; H[7] += h; +} + +void +zio_checksum_SHA256(const void *buf, uint64_t size, zio_cksum_t *zcp) +{ + uint32_t H[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; + uint8_t pad[128]; + int i, padsize; + + for (i = 0; i < (size & ~63ULL); i += 64) + SHA256Transform(H, (uint8_t *)buf + i); + + for (padsize = 0; i < size; i++) + pad[padsize++] = *((uint8_t *)buf + i); + + for (pad[padsize++] = 0x80; (padsize & 63) != 56; padsize++) + pad[padsize] = 0; + + for (i = 56; i >= 0; i -= 8) + pad[padsize++] = (size << 3) >> i; + + for (i = 0; i < padsize; i += 64) + SHA256Transform(H, pad + i); + + ZIO_SET_CHECKSUM(zcp, + (uint64_t)H[0] << 32 | H[1], + (uint64_t)H[2] << 32 | H[3], + (uint64_t)H[4] << 32 | H[5], + (uint64_t)H[6] << 32 | H[7]); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/spa.c b/external/cddl/osnet/dist/uts/common/fs/zfs/spa.c new file mode 100644 index 000000000000..fb1b96f8b811 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/spa.c @@ -0,0 +1,4301 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * This file contains all the routines used when modifying on-disk SPA state. + * This includes opening, importing, destroying, exporting a pool, and syncing a + * pool. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zfs_prop.h" +#include "zfs_comutil.h" + +int zio_taskq_threads[ZIO_TYPES][ZIO_TASKQ_TYPES] = { + /* ISSUE INTR */ + { 1, 1 }, /* ZIO_TYPE_NULL */ + { 1, 8 }, /* ZIO_TYPE_READ */ + { 8, 1 }, /* ZIO_TYPE_WRITE */ + { 1, 1 }, /* ZIO_TYPE_FREE */ + { 1, 1 }, /* ZIO_TYPE_CLAIM */ + { 1, 1 }, /* ZIO_TYPE_IOCTL */ +}; + +static void spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx); +static boolean_t spa_has_active_shared_spare(spa_t *spa); + +/* + * ========================================================================== + * SPA properties routines + * ========================================================================== + */ + +/* + * Add a (source=src, propname=propval) list to an nvlist. + */ +static void +spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, + uint64_t intval, zprop_source_t src) +{ + const char *propname = zpool_prop_to_name(prop); + nvlist_t *propval; + + VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); + + if (strval != NULL) + VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); + else + VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); + + VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); + nvlist_free(propval); +} + +/* + * Get property values from the spa configuration. + */ +static void +spa_prop_get_config(spa_t *spa, nvlist_t **nvp) +{ + uint64_t size = spa_get_space(spa); + uint64_t used = spa_get_alloc(spa); + uint64_t cap, version; + zprop_source_t src = ZPROP_SRC_NONE; + spa_config_dirent_t *dp; + + ASSERT(MUTEX_HELD(&spa->spa_props_lock)); + + /* + * readonly properties + */ + spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); + spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); + spa_prop_add_list(*nvp, ZPOOL_PROP_USED, NULL, used, src); + spa_prop_add_list(*nvp, ZPOOL_PROP_AVAILABLE, NULL, size - used, src); + + cap = (size == 0) ? 0 : (used * 100 / size); + spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); + + spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); + spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, + spa->spa_root_vdev->vdev_state, src); + + /* + * settable properties that are not stored in the pool property object. + */ + version = spa_version(spa); + if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) + src = ZPROP_SRC_DEFAULT; + else + src = ZPROP_SRC_LOCAL; + spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); + + if (spa->spa_root != NULL) + spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, + 0, ZPROP_SRC_LOCAL); + + if ((dp = list_head(&spa->spa_config_list)) != NULL) { + if (dp->scd_path == NULL) { + spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, + "none", 0, ZPROP_SRC_LOCAL); + } else if (strcmp(dp->scd_path, spa_config_path) != 0) { + spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, + dp->scd_path, 0, ZPROP_SRC_LOCAL); + } + } +} + +/* + * Get zpool property values. + */ +int +spa_prop_get(spa_t *spa, nvlist_t **nvp) +{ + zap_cursor_t zc; + zap_attribute_t za; + objset_t *mos = spa->spa_meta_objset; + int err; + + VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + mutex_enter(&spa->spa_props_lock); + + /* + * Get properties from the spa config. + */ + spa_prop_get_config(spa, nvp); + + /* If no pool property object, no more prop to get. */ + if (spa->spa_pool_props_object == 0) { + mutex_exit(&spa->spa_props_lock); + return (0); + } + + /* + * Get properties from the MOS pool property object. + */ + for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); + (err = zap_cursor_retrieve(&zc, &za)) == 0; + zap_cursor_advance(&zc)) { + uint64_t intval = 0; + char *strval = NULL; + zprop_source_t src = ZPROP_SRC_DEFAULT; + zpool_prop_t prop; + + if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) + continue; + + switch (za.za_integer_length) { + case 8: + /* integer property */ + if (za.za_first_integer != + zpool_prop_default_numeric(prop)) + src = ZPROP_SRC_LOCAL; + + if (prop == ZPOOL_PROP_BOOTFS) { + dsl_pool_t *dp; + dsl_dataset_t *ds = NULL; + + dp = spa_get_dsl(spa); + rw_enter(&dp->dp_config_rwlock, RW_READER); + if (err = dsl_dataset_hold_obj(dp, + za.za_first_integer, FTAG, &ds)) { + rw_exit(&dp->dp_config_rwlock); + break; + } + + strval = kmem_alloc( + MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, + KM_SLEEP); + dsl_dataset_name(ds, strval); + dsl_dataset_rele(ds, FTAG); + rw_exit(&dp->dp_config_rwlock); + } else { + strval = NULL; + intval = za.za_first_integer; + } + + spa_prop_add_list(*nvp, prop, strval, intval, src); + + if (strval != NULL) + kmem_free(strval, + MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); + + break; + + case 1: + /* string property */ + strval = kmem_alloc(za.za_num_integers, KM_SLEEP); + err = zap_lookup(mos, spa->spa_pool_props_object, + za.za_name, 1, za.za_num_integers, strval); + if (err) { + kmem_free(strval, za.za_num_integers); + break; + } + spa_prop_add_list(*nvp, prop, strval, 0, src); + kmem_free(strval, za.za_num_integers); + break; + + default: + break; + } + } + zap_cursor_fini(&zc); + mutex_exit(&spa->spa_props_lock); +out: + if (err && err != ENOENT) { + nvlist_free(*nvp); + *nvp = NULL; + return (err); + } + + return (0); +} + +/* + * Validate the given pool properties nvlist and modify the list + * for the property values to be set. + */ +static int +spa_prop_validate(spa_t *spa, nvlist_t *props) +{ + nvpair_t *elem; + int error = 0, reset_bootfs = 0; + uint64_t objnum; + + elem = NULL; + while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { + zpool_prop_t prop; + char *propname, *strval; + uint64_t intval; + objset_t *os; + char *slash; + + propname = nvpair_name(elem); + + if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) + return (EINVAL); + + switch (prop) { + case ZPOOL_PROP_VERSION: + error = nvpair_value_uint64(elem, &intval); + if (!error && + (intval < spa_version(spa) || intval > SPA_VERSION)) + error = EINVAL; + break; + + case ZPOOL_PROP_DELEGATION: + case ZPOOL_PROP_AUTOREPLACE: + case ZPOOL_PROP_LISTSNAPS: + error = nvpair_value_uint64(elem, &intval); + if (!error && intval > 1) + error = EINVAL; + break; + + case ZPOOL_PROP_BOOTFS: + if (spa_version(spa) < SPA_VERSION_BOOTFS) { + error = ENOTSUP; + break; + } + + /* + * Make sure the vdev config is bootable + */ + if (!vdev_is_bootable(spa->spa_root_vdev)) { + error = ENOTSUP; + break; + } + + reset_bootfs = 1; + + error = nvpair_value_string(elem, &strval); + + if (!error) { + uint64_t compress; + + if (strval == NULL || strval[0] == '\0') { + objnum = zpool_prop_default_numeric( + ZPOOL_PROP_BOOTFS); + break; + } + + if (error = dmu_objset_open(strval, DMU_OST_ZFS, + DS_MODE_USER | DS_MODE_READONLY, &os)) + break; + + /* We don't support gzip bootable datasets */ + if ((error = dsl_prop_get_integer(strval, + zfs_prop_to_name(ZFS_PROP_COMPRESSION), + &compress, NULL)) == 0 && + !BOOTFS_COMPRESS_VALID(compress)) { + error = ENOTSUP; + } else { + objnum = dmu_objset_id(os); + } + dmu_objset_close(os); + } + break; + + case ZPOOL_PROP_FAILUREMODE: + error = nvpair_value_uint64(elem, &intval); + if (!error && (intval < ZIO_FAILURE_MODE_WAIT || + intval > ZIO_FAILURE_MODE_PANIC)) + error = EINVAL; + + /* + * This is a special case which only occurs when + * the pool has completely failed. This allows + * the user to change the in-core failmode property + * without syncing it out to disk (I/Os might + * currently be blocked). We do this by returning + * EIO to the caller (spa_prop_set) to trick it + * into thinking we encountered a property validation + * error. + */ + if (!error && spa_suspended(spa)) { + spa->spa_failmode = intval; + error = EIO; + } + break; + + case ZPOOL_PROP_CACHEFILE: + if ((error = nvpair_value_string(elem, &strval)) != 0) + break; + + if (strval[0] == '\0') + break; + + if (strcmp(strval, "none") == 0) + break; + + if (strval[0] != '/') { + error = EINVAL; + break; + } + + slash = strrchr(strval, '/'); + ASSERT(slash != NULL); + + if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || + strcmp(slash, "/..") == 0) + error = EINVAL; + break; + } + + if (error) + break; + } + + if (!error && reset_bootfs) { + error = nvlist_remove(props, + zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); + + if (!error) { + error = nvlist_add_uint64(props, + zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); + } + } + + return (error); +} + +int +spa_prop_set(spa_t *spa, nvlist_t *nvp) +{ + int error; + + if ((error = spa_prop_validate(spa, nvp)) != 0) + return (error); + + return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_sync_props, + spa, nvp, 3)); +} + +/* + * If the bootfs property value is dsobj, clear it. + */ +void +spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) +{ + if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { + VERIFY(zap_remove(spa->spa_meta_objset, + spa->spa_pool_props_object, + zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); + spa->spa_bootfs = 0; + } +} + +/* + * ========================================================================== + * SPA state manipulation (open/create/destroy/import/export) + * ========================================================================== + */ + +static int +spa_error_entry_compare(const void *a, const void *b) +{ + spa_error_entry_t *sa = (spa_error_entry_t *)a; + spa_error_entry_t *sb = (spa_error_entry_t *)b; + int ret; + + ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, + sizeof (zbookmark_t)); + + if (ret < 0) + return (-1); + else if (ret > 0) + return (1); + else + return (0); +} + +/* + * Utility function which retrieves copies of the current logs and + * re-initializes them in the process. + */ +void +spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) +{ + ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); + + bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); + bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); + + avl_create(&spa->spa_errlist_scrub, + spa_error_entry_compare, sizeof (spa_error_entry_t), + offsetof(spa_error_entry_t, se_avl)); + avl_create(&spa->spa_errlist_last, + spa_error_entry_compare, sizeof (spa_error_entry_t), + offsetof(spa_error_entry_t, se_avl)); +} + +/* + * Activate an uninitialized pool. + */ +static void +spa_activate(spa_t *spa) +{ + ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); + + spa->spa_state = POOL_STATE_ACTIVE; + + spa->spa_normal_class = metaslab_class_create(); + spa->spa_log_class = metaslab_class_create(); + + for (int t = 0; t < ZIO_TYPES; t++) { + for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { + spa->spa_zio_taskq[t][q] = taskq_create("spa_zio", + zio_taskq_threads[t][q], maxclsyspri, 50, + INT_MAX, TASKQ_PREPOPULATE); + } + } + + list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_config_dirty_node)); + list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), + offsetof(vdev_t, vdev_state_dirty_node)); + + txg_list_create(&spa->spa_vdev_txg_list, + offsetof(struct vdev, vdev_txg_node)); + + avl_create(&spa->spa_errlist_scrub, + spa_error_entry_compare, sizeof (spa_error_entry_t), + offsetof(spa_error_entry_t, se_avl)); + avl_create(&spa->spa_errlist_last, + spa_error_entry_compare, sizeof (spa_error_entry_t), + offsetof(spa_error_entry_t, se_avl)); +} + +/* + * Opposite of spa_activate(). + */ +static void +spa_deactivate(spa_t *spa) +{ + ASSERT(spa->spa_sync_on == B_FALSE); + ASSERT(spa->spa_dsl_pool == NULL); + ASSERT(spa->spa_root_vdev == NULL); + + ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); + + txg_list_destroy(&spa->spa_vdev_txg_list); + + list_destroy(&spa->spa_config_dirty_list); + list_destroy(&spa->spa_state_dirty_list); + + for (int t = 0; t < ZIO_TYPES; t++) { + for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { + taskq_destroy(spa->spa_zio_taskq[t][q]); + spa->spa_zio_taskq[t][q] = NULL; + } + } + + metaslab_class_destroy(spa->spa_normal_class); + spa->spa_normal_class = NULL; + + metaslab_class_destroy(spa->spa_log_class); + spa->spa_log_class = NULL; + + /* + * If this was part of an import or the open otherwise failed, we may + * still have errors left in the queues. Empty them just in case. + */ + spa_errlog_drain(spa); + + avl_destroy(&spa->spa_errlist_scrub); + avl_destroy(&spa->spa_errlist_last); + + spa->spa_state = POOL_STATE_UNINITIALIZED; +} + +/* + * Verify a pool configuration, and construct the vdev tree appropriately. This + * will create all the necessary vdevs in the appropriate layout, with each vdev + * in the CLOSED state. This will prep the pool before open/creation/import. + * All vdev validation is done by the vdev_alloc() routine. + */ +static int +spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, + uint_t id, int atype) +{ + nvlist_t **child; + uint_t c, children; + int error; + + if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) + return (error); + + if ((*vdp)->vdev_ops->vdev_op_leaf) + return (0); + + error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + &child, &children); + + if (error == ENOENT) + return (0); + + if (error) { + vdev_free(*vdp); + *vdp = NULL; + return (EINVAL); + } + + for (c = 0; c < children; c++) { + vdev_t *vd; + if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, + atype)) != 0) { + vdev_free(*vdp); + *vdp = NULL; + return (error); + } + } + + ASSERT(*vdp != NULL); + + return (0); +} + +/* + * Opposite of spa_load(). + */ +static void +spa_unload(spa_t *spa) +{ + int i; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + /* + * Stop async tasks. + */ + spa_async_suspend(spa); + + /* + * Stop syncing. + */ + if (spa->spa_sync_on) { + txg_sync_stop(spa->spa_dsl_pool); + spa->spa_sync_on = B_FALSE; + } + + /* + * Wait for any outstanding async I/O to complete. + */ + mutex_enter(&spa->spa_async_root_lock); + while (spa->spa_async_root_count != 0) + cv_wait(&spa->spa_async_root_cv, &spa->spa_async_root_lock); + mutex_exit(&spa->spa_async_root_lock); + + /* + * Drop and purge level 2 cache + */ + spa_l2cache_drop(spa); + + /* + * Close the dsl pool. + */ + if (spa->spa_dsl_pool) { + dsl_pool_close(spa->spa_dsl_pool); + spa->spa_dsl_pool = NULL; + } + + /* + * Close all vdevs. + */ + if (spa->spa_root_vdev) + vdev_free(spa->spa_root_vdev); + ASSERT(spa->spa_root_vdev == NULL); + + for (i = 0; i < spa->spa_spares.sav_count; i++) + vdev_free(spa->spa_spares.sav_vdevs[i]); + if (spa->spa_spares.sav_vdevs) { + kmem_free(spa->spa_spares.sav_vdevs, + spa->spa_spares.sav_count * sizeof (void *)); + spa->spa_spares.sav_vdevs = NULL; + } + if (spa->spa_spares.sav_config) { + nvlist_free(spa->spa_spares.sav_config); + spa->spa_spares.sav_config = NULL; + } + spa->spa_spares.sav_count = 0; + + for (i = 0; i < spa->spa_l2cache.sav_count; i++) + vdev_free(spa->spa_l2cache.sav_vdevs[i]); + if (spa->spa_l2cache.sav_vdevs) { + kmem_free(spa->spa_l2cache.sav_vdevs, + spa->spa_l2cache.sav_count * sizeof (void *)); + spa->spa_l2cache.sav_vdevs = NULL; + } + if (spa->spa_l2cache.sav_config) { + nvlist_free(spa->spa_l2cache.sav_config); + spa->spa_l2cache.sav_config = NULL; + } + spa->spa_l2cache.sav_count = 0; + + spa->spa_async_suspended = 0; +} + +/* + * Load (or re-load) the current list of vdevs describing the active spares for + * this pool. When this is called, we have some form of basic information in + * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and + * then re-generate a more complete list including status information. + */ +static void +spa_load_spares(spa_t *spa) +{ + nvlist_t **spares; + uint_t nspares; + int i; + vdev_t *vd, *tvd; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + /* + * First, close and free any existing spare vdevs. + */ + for (i = 0; i < spa->spa_spares.sav_count; i++) { + vd = spa->spa_spares.sav_vdevs[i]; + + /* Undo the call to spa_activate() below */ + if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, + B_FALSE)) != NULL && tvd->vdev_isspare) + spa_spare_remove(tvd); + vdev_close(vd); + vdev_free(vd); + } + + if (spa->spa_spares.sav_vdevs) + kmem_free(spa->spa_spares.sav_vdevs, + spa->spa_spares.sav_count * sizeof (void *)); + + if (spa->spa_spares.sav_config == NULL) + nspares = 0; + else + VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); + + spa->spa_spares.sav_count = (int)nspares; + spa->spa_spares.sav_vdevs = NULL; + + if (nspares == 0) + return; + + /* + * Construct the array of vdevs, opening them to get status in the + * process. For each spare, there is potentially two different vdev_t + * structures associated with it: one in the list of spares (used only + * for basic validation purposes) and one in the active vdev + * configuration (if it's spared in). During this phase we open and + * validate each vdev on the spare list. If the vdev also exists in the + * active configuration, then we also mark this vdev as an active spare. + */ + spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), + KM_SLEEP); + for (i = 0; i < spa->spa_spares.sav_count; i++) { + VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, + VDEV_ALLOC_SPARE) == 0); + ASSERT(vd != NULL); + + spa->spa_spares.sav_vdevs[i] = vd; + + if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, + B_FALSE)) != NULL) { + if (!tvd->vdev_isspare) + spa_spare_add(tvd); + + /* + * We only mark the spare active if we were successfully + * able to load the vdev. Otherwise, importing a pool + * with a bad active spare would result in strange + * behavior, because multiple pool would think the spare + * is actively in use. + * + * There is a vulnerability here to an equally bizarre + * circumstance, where a dead active spare is later + * brought back to life (onlined or otherwise). Given + * the rarity of this scenario, and the extra complexity + * it adds, we ignore the possibility. + */ + if (!vdev_is_dead(tvd)) + spa_spare_activate(tvd); + } + + vd->vdev_top = vd; + + if (vdev_open(vd) != 0) + continue; + + if (vdev_validate_aux(vd) == 0) + spa_spare_add(vd); + } + + /* + * Recompute the stashed list of spares, with status information + * this time. + */ + VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, + DATA_TYPE_NVLIST_ARRAY) == 0); + + spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), + KM_SLEEP); + for (i = 0; i < spa->spa_spares.sav_count; i++) + spares[i] = vdev_config_generate(spa, + spa->spa_spares.sav_vdevs[i], B_TRUE, B_TRUE, B_FALSE); + VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); + for (i = 0; i < spa->spa_spares.sav_count; i++) + nvlist_free(spares[i]); + kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); +} + +/* + * Load (or re-load) the current list of vdevs describing the active l2cache for + * this pool. When this is called, we have some form of basic information in + * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and + * then re-generate a more complete list including status information. + * Devices which are already active have their details maintained, and are + * not re-opened. + */ +static void +spa_load_l2cache(spa_t *spa) +{ + nvlist_t **l2cache; + uint_t nl2cache; + int i, j, oldnvdevs; + uint64_t guid, size; + vdev_t *vd, **oldvdevs, **newvdevs; + spa_aux_vdev_t *sav = &spa->spa_l2cache; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + if (sav->sav_config != NULL) { + VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, + ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); + newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); + } else { + nl2cache = 0; + } + + oldvdevs = sav->sav_vdevs; + oldnvdevs = sav->sav_count; + sav->sav_vdevs = NULL; + sav->sav_count = 0; + + /* + * Process new nvlist of vdevs. + */ + for (i = 0; i < nl2cache; i++) { + VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, + &guid) == 0); + + newvdevs[i] = NULL; + for (j = 0; j < oldnvdevs; j++) { + vd = oldvdevs[j]; + if (vd != NULL && guid == vd->vdev_guid) { + /* + * Retain previous vdev for add/remove ops. + */ + newvdevs[i] = vd; + oldvdevs[j] = NULL; + break; + } + } + + if (newvdevs[i] == NULL) { + /* + * Create new vdev + */ + VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, + VDEV_ALLOC_L2CACHE) == 0); + ASSERT(vd != NULL); + newvdevs[i] = vd; + + /* + * Commit this vdev as an l2cache device, + * even if it fails to open. + */ + spa_l2cache_add(vd); + + vd->vdev_top = vd; + vd->vdev_aux = sav; + + spa_l2cache_activate(vd); + + if (vdev_open(vd) != 0) + continue; + + (void) vdev_validate_aux(vd); + + if (!vdev_is_dead(vd)) { + size = vdev_get_rsize(vd); + l2arc_add_vdev(spa, vd, + VDEV_LABEL_START_SIZE, + size - VDEV_LABEL_START_SIZE); + } + } + } + + /* + * Purge vdevs that were dropped + */ + for (i = 0; i < oldnvdevs; i++) { + uint64_t pool; + + vd = oldvdevs[i]; + if (vd != NULL) { + if ((spa_mode & FWRITE) && + spa_l2cache_exists(vd->vdev_guid, &pool) && + pool != 0ULL && + l2arc_vdev_present(vd)) { + l2arc_remove_vdev(vd); + } + (void) vdev_close(vd); + spa_l2cache_remove(vd); + } + } + + if (oldvdevs) + kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); + + if (sav->sav_config == NULL) + goto out; + + sav->sav_vdevs = newvdevs; + sav->sav_count = (int)nl2cache; + + /* + * Recompute the stashed list of l2cache devices, with status + * information this time. + */ + VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, + DATA_TYPE_NVLIST_ARRAY) == 0); + + l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); + for (i = 0; i < sav->sav_count; i++) + l2cache[i] = vdev_config_generate(spa, + sav->sav_vdevs[i], B_TRUE, B_FALSE, B_TRUE); + VERIFY(nvlist_add_nvlist_array(sav->sav_config, + ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); +out: + for (i = 0; i < sav->sav_count; i++) + nvlist_free(l2cache[i]); + if (sav->sav_count) + kmem_free(l2cache, sav->sav_count * sizeof (void *)); +} + +static int +load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) +{ + dmu_buf_t *db; + char *packed = NULL; + size_t nvsize = 0; + int error; + *value = NULL; + + VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); + nvsize = *(uint64_t *)db->db_data; + dmu_buf_rele(db, FTAG); + + packed = kmem_alloc(nvsize, KM_SLEEP); + error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed); + if (error == 0) + error = nvlist_unpack(packed, nvsize, value, 0); + kmem_free(packed, nvsize); + + return (error); +} + +/* + * Checks to see if the given vdev could not be opened, in which case we post a + * sysevent to notify the autoreplace code that the device has been removed. + */ +static void +spa_check_removed(vdev_t *vd) +{ + int c; + + for (c = 0; c < vd->vdev_children; c++) + spa_check_removed(vd->vdev_child[c]); + + if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { + zfs_post_autoreplace(vd->vdev_spa, vd); + spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); + } +} + +/* + * Check for missing log devices + */ +int +spa_check_logs(spa_t *spa) +{ + switch (spa->spa_log_state) { + case SPA_LOG_MISSING: + /* need to recheck in case slog has been restored */ + case SPA_LOG_UNKNOWN: + if (dmu_objset_find(spa->spa_name, zil_check_log_chain, NULL, + DS_FIND_CHILDREN)) { + spa->spa_log_state = SPA_LOG_MISSING; + return (1); + } + break; + + case SPA_LOG_CLEAR: + (void) dmu_objset_find(spa->spa_name, zil_clear_log_chain, NULL, + DS_FIND_CHILDREN); + break; + } + spa->spa_log_state = SPA_LOG_GOOD; + return (0); +} + +/* + * Load an existing storage pool, using the pool's builtin spa_config as a + * source of configuration information. + */ +static int +spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) +{ + int error = 0; + nvlist_t *nvroot = NULL; + vdev_t *rvd; + uberblock_t *ub = &spa->spa_uberblock; + uint64_t config_cache_txg = spa->spa_config_txg; + uint64_t pool_guid; + uint64_t version; + uint64_t autoreplace = 0; + char *ereport = FM_EREPORT_ZFS_POOL; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + spa->spa_load_state = state; + + if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) || + nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) { + error = EINVAL; + goto out; + } + + /* + * Versioning wasn't explicitly added to the label until later, so if + * it's not present treat it as the initial version. + */ + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) != 0) + version = SPA_VERSION_INITIAL; + + (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, + &spa->spa_config_txg); + + if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && + spa_guid_exists(pool_guid, 0)) { + error = EEXIST; + goto out; + } + + spa->spa_load_guid = pool_guid; + + /* + * Parse the configuration into a vdev tree. We explicitly set the + * value that will be returned by spa_version() since parsing the + * configuration requires knowing the version number. + */ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa->spa_ubsync.ub_version = version; + error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_LOAD); + spa_config_exit(spa, SCL_ALL, FTAG); + + if (error != 0) + goto out; + + ASSERT(spa->spa_root_vdev == rvd); + ASSERT(spa_guid(spa) == pool_guid); + + /* + * Try to open all vdevs, loading each label in the process. + */ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + error = vdev_open(rvd); + spa_config_exit(spa, SCL_ALL, FTAG); + if (error != 0) + goto out; + + /* + * Validate the labels for all leaf vdevs. We need to grab the config + * lock because all label I/O is done with ZIO_FLAG_CONFIG_WRITER. + */ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + error = vdev_validate(rvd); + spa_config_exit(spa, SCL_ALL, FTAG); + + if (error != 0) + goto out; + + if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { + error = ENXIO; + goto out; + } + + /* + * Find the best uberblock. + */ + vdev_uberblock_load(NULL, rvd, ub); + + /* + * If we weren't able to find a single valid uberblock, return failure. + */ + if (ub->ub_txg == 0) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = ENXIO; + goto out; + } + + /* + * If the pool is newer than the code, we can't open it. + */ + if (ub->ub_version > SPA_VERSION) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_VERSION_NEWER); + error = ENOTSUP; + goto out; + } + + /* + * If the vdev guid sum doesn't match the uberblock, we have an + * incomplete configuration. + */ + if (rvd->vdev_guid_sum != ub->ub_guid_sum && mosconfig) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_BAD_GUID_SUM); + error = ENXIO; + goto out; + } + + /* + * Initialize internal SPA structures. + */ + spa->spa_state = POOL_STATE_ACTIVE; + spa->spa_ubsync = spa->spa_uberblock; + spa->spa_first_txg = spa_last_synced_txg(spa) + 1; + error = dsl_pool_open(spa, spa->spa_first_txg, &spa->spa_dsl_pool); + if (error) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + goto out; + } + spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; + + if (zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, + sizeof (uint64_t), 1, &spa->spa_config_object) != 0) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + if (!mosconfig) { + nvlist_t *newconfig; + uint64_t hostid; + + if (load_nvlist(spa, spa->spa_config_object, &newconfig) != 0) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + if (!spa_is_root(spa) && nvlist_lookup_uint64(newconfig, + ZPOOL_CONFIG_HOSTID, &hostid) == 0) { + char *hostname; + unsigned long myhostid = 0; + + VERIFY(nvlist_lookup_string(newconfig, + ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); + + (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); + if (hostid != 0 && myhostid != 0 && + (unsigned long)hostid != myhostid) { + cmn_err(CE_WARN, "pool '%s' could not be " + "loaded as it was last accessed by " + "another system (host: %s hostid: 0x%lx). " + "See: http://www.sun.com/msg/ZFS-8000-EY", + spa_name(spa), hostname, + (unsigned long)hostid); + error = EBADF; + goto out; + } + } + + spa_config_set(spa, newconfig); + spa_unload(spa); + spa_deactivate(spa); + spa_activate(spa); + + return (spa_load(spa, newconfig, state, B_TRUE)); + } + + if (zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, + sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj) != 0) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + /* + * Load the bit that tells us to use the new accounting function + * (raid-z deflation). If we have an older pool, this will not + * be present. + */ + error = zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, + sizeof (uint64_t), 1, &spa->spa_deflate); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + /* + * Load the persistent error log. If we have an older pool, this will + * not be present. + */ + error = zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST, + sizeof (uint64_t), 1, &spa->spa_errlog_last); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + error = zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB, + sizeof (uint64_t), 1, &spa->spa_errlog_scrub); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + /* + * Load the history object. If we have an older pool, this + * will not be present. + */ + error = zap_lookup(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_HISTORY, + sizeof (uint64_t), 1, &spa->spa_history); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + /* + * Load any hot spares for this pool. + */ + error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_SPARES, sizeof (uint64_t), 1, &spa->spa_spares.sav_object); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + if (error == 0) { + ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); + if (load_nvlist(spa, spa->spa_spares.sav_object, + &spa->spa_spares.sav_config) != 0) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_spares(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + } + + /* + * Load any level 2 ARC devices for this pool. + */ + error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_L2CACHE, sizeof (uint64_t), 1, + &spa->spa_l2cache.sav_object); + if (error != 0 && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + if (error == 0) { + ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); + if (load_nvlist(spa, spa->spa_l2cache.sav_object, + &spa->spa_l2cache.sav_config) != 0) { + vdev_set_state(rvd, B_TRUE, + VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_l2cache(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + } + + if (spa_check_logs(spa)) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_BAD_LOG); + error = ENXIO; + ereport = FM_EREPORT_ZFS_LOG_REPLAY; + goto out; + } + + + spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); + + error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_PROPS, sizeof (uint64_t), 1, &spa->spa_pool_props_object); + + if (error && error != ENOENT) { + vdev_set_state(rvd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + error = EIO; + goto out; + } + + if (error == 0) { + (void) zap_lookup(spa->spa_meta_objset, + spa->spa_pool_props_object, + zpool_prop_to_name(ZPOOL_PROP_BOOTFS), + sizeof (uint64_t), 1, &spa->spa_bootfs); + (void) zap_lookup(spa->spa_meta_objset, + spa->spa_pool_props_object, + zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), + sizeof (uint64_t), 1, &autoreplace); + (void) zap_lookup(spa->spa_meta_objset, + spa->spa_pool_props_object, + zpool_prop_to_name(ZPOOL_PROP_DELEGATION), + sizeof (uint64_t), 1, &spa->spa_delegation); + (void) zap_lookup(spa->spa_meta_objset, + spa->spa_pool_props_object, + zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE), + sizeof (uint64_t), 1, &spa->spa_failmode); + } + + /* + * If the 'autoreplace' property is set, then post a resource notifying + * the ZFS DE that it should not issue any faults for unopenable + * devices. We also iterate over the vdevs, and post a sysevent for any + * unopenable vdevs so that the normal autoreplace handler can take + * over. + */ + if (autoreplace && state != SPA_LOAD_TRYIMPORT) + spa_check_removed(spa->spa_root_vdev); + + /* + * Load the vdev state for all toplevel vdevs. + */ + vdev_load(rvd); + + /* + * Propagate the leaf DTLs we just loaded all the way up the tree. + */ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + vdev_dtl_reassess(rvd, 0, 0, B_FALSE); + spa_config_exit(spa, SCL_ALL, FTAG); + + /* + * Check the state of the root vdev. If it can't be opened, it + * indicates one or more toplevel vdevs are faulted. + */ + if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) { + error = ENXIO; + goto out; + } + + if ((spa_mode & FWRITE) && state != SPA_LOAD_TRYIMPORT) { + dmu_tx_t *tx; + int need_update = B_FALSE; + int c; + + /* + * Claim log blocks that haven't been committed yet. + * This must all happen in a single txg. + */ + tx = dmu_tx_create_assigned(spa_get_dsl(spa), + spa_first_txg(spa)); + (void) dmu_objset_find(spa_name(spa), + zil_claim, tx, DS_FIND_CHILDREN); + dmu_tx_commit(tx); + + spa->spa_sync_on = B_TRUE; + txg_sync_start(spa->spa_dsl_pool); + + /* + * Wait for all claims to sync. + */ + txg_wait_synced(spa->spa_dsl_pool, 0); + + /* + * If the config cache is stale, or we have uninitialized + * metaslabs (see spa_vdev_add()), then update the config. + */ + if (config_cache_txg != spa->spa_config_txg || + state == SPA_LOAD_IMPORT) + need_update = B_TRUE; + + for (c = 0; c < rvd->vdev_children; c++) + if (rvd->vdev_child[c]->vdev_ms_array == 0) + need_update = B_TRUE; + + /* + * Update the config cache asychronously in case we're the + * root pool, in which case the config cache isn't writable yet. + */ + if (need_update) + spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); + } + + error = 0; +out: + spa->spa_minref = refcount_count(&spa->spa_refcount); + if (error && error != EBADF) + zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); + spa->spa_load_state = SPA_LOAD_NONE; + spa->spa_ena = 0; + + return (error); +} + +/* + * Pool Open/Import + * + * The import case is identical to an open except that the configuration is sent + * down from userland, instead of grabbed from the configuration cache. For the + * case of an open, the pool configuration will exist in the + * POOL_STATE_UNINITIALIZED state. + * + * The stats information (gen/count/ustats) is used to gather vdev statistics at + * the same time open the pool, without having to keep around the spa_t in some + * ambiguous state. + */ +static int +spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t **config) +{ + spa_t *spa; + int error; + int locked = B_FALSE; + + *spapp = NULL; + + /* + * As disgusting as this is, we need to support recursive calls to this + * function because dsl_dir_open() is called during spa_load(), and ends + * up calling spa_open() again. The real fix is to figure out how to + * avoid dsl_dir_open() calling this in the first place. + */ + if (mutex_owner(&spa_namespace_lock) != curthread) { + mutex_enter(&spa_namespace_lock); + locked = B_TRUE; + } + + if ((spa = spa_lookup(pool)) == NULL) { + if (locked) + mutex_exit(&spa_namespace_lock); + return (ENOENT); + } + if (spa->spa_state == POOL_STATE_UNINITIALIZED) { + + spa_activate(spa); + + error = spa_load(spa, spa->spa_config, SPA_LOAD_OPEN, B_FALSE); + + if (error == EBADF) { + /* + * If vdev_validate() returns failure (indicated by + * EBADF), it indicates that one of the vdevs indicates + * that the pool has been exported or destroyed. If + * this is the case, the config cache is out of sync and + * we should remove the pool from the namespace. + */ + spa_unload(spa); + spa_deactivate(spa); + spa_config_sync(spa, B_TRUE, B_TRUE); + spa_remove(spa); + if (locked) + mutex_exit(&spa_namespace_lock); + return (ENOENT); + } + + if (error) { + /* + * We can't open the pool, but we still have useful + * information: the state of each vdev after the + * attempted vdev_open(). Return this to the user. + */ + if (config != NULL && spa->spa_root_vdev != NULL) + *config = spa_config_generate(spa, NULL, -1ULL, + B_TRUE); + spa_unload(spa); + spa_deactivate(spa); + spa->spa_last_open_failed = B_TRUE; + if (locked) + mutex_exit(&spa_namespace_lock); + *spapp = NULL; + return (error); + } else { + spa->spa_last_open_failed = B_FALSE; + } + } + + spa_open_ref(spa, tag); + + if (locked) + mutex_exit(&spa_namespace_lock); + + *spapp = spa; + + if (config != NULL) + *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); + + return (0); +} + +int +spa_open(const char *name, spa_t **spapp, void *tag) +{ + return (spa_open_common(name, spapp, tag, NULL)); +} + +/* + * Lookup the given spa_t, incrementing the inject count in the process, + * preventing it from being exported or destroyed. + */ +spa_t * +spa_inject_addref(char *name) +{ + spa_t *spa; + + mutex_enter(&spa_namespace_lock); + if ((spa = spa_lookup(name)) == NULL) { + mutex_exit(&spa_namespace_lock); + return (NULL); + } + spa->spa_inject_ref++; + mutex_exit(&spa_namespace_lock); + + return (spa); +} + +void +spa_inject_delref(spa_t *spa) +{ + mutex_enter(&spa_namespace_lock); + spa->spa_inject_ref--; + mutex_exit(&spa_namespace_lock); +} + +/* + * Add spares device information to the nvlist. + */ +static void +spa_add_spares(spa_t *spa, nvlist_t *config) +{ + nvlist_t **spares; + uint_t i, nspares; + nvlist_t *nvroot; + uint64_t guid; + vdev_stat_t *vs; + uint_t vsc; + uint64_t pool; + + if (spa->spa_spares.sav_count == 0) + return; + + VERIFY(nvlist_lookup_nvlist(config, + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); + if (nspares != 0) { + VERIFY(nvlist_add_nvlist_array(nvroot, + ZPOOL_CONFIG_SPARES, spares, nspares) == 0); + VERIFY(nvlist_lookup_nvlist_array(nvroot, + ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); + + /* + * Go through and find any spares which have since been + * repurposed as an active spare. If this is the case, update + * their status appropriately. + */ + for (i = 0; i < nspares; i++) { + VERIFY(nvlist_lookup_uint64(spares[i], + ZPOOL_CONFIG_GUID, &guid) == 0); + if (spa_spare_exists(guid, &pool, NULL) && + pool != 0ULL) { + VERIFY(nvlist_lookup_uint64_array( + spares[i], ZPOOL_CONFIG_STATS, + (uint64_t **)&vs, &vsc) == 0); + vs->vs_state = VDEV_STATE_CANT_OPEN; + vs->vs_aux = VDEV_AUX_SPARED; + } + } + } +} + +/* + * Add l2cache device information to the nvlist, including vdev stats. + */ +static void +spa_add_l2cache(spa_t *spa, nvlist_t *config) +{ + nvlist_t **l2cache; + uint_t i, j, nl2cache; + nvlist_t *nvroot; + uint64_t guid; + vdev_t *vd; + vdev_stat_t *vs; + uint_t vsc; + + if (spa->spa_l2cache.sav_count == 0) + return; + + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + + VERIFY(nvlist_lookup_nvlist(config, + ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); + VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); + if (nl2cache != 0) { + VERIFY(nvlist_add_nvlist_array(nvroot, + ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); + VERIFY(nvlist_lookup_nvlist_array(nvroot, + ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); + + /* + * Update level 2 cache device stats. + */ + + for (i = 0; i < nl2cache; i++) { + VERIFY(nvlist_lookup_uint64(l2cache[i], + ZPOOL_CONFIG_GUID, &guid) == 0); + + vd = NULL; + for (j = 0; j < spa->spa_l2cache.sav_count; j++) { + if (guid == + spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { + vd = spa->spa_l2cache.sav_vdevs[j]; + break; + } + } + ASSERT(vd != NULL); + + VERIFY(nvlist_lookup_uint64_array(l2cache[i], + ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0); + vdev_get_stats(vd, vs); + } + } + + spa_config_exit(spa, SCL_CONFIG, FTAG); +} + +int +spa_get_stats(const char *name, nvlist_t **config, char *altroot, size_t buflen) +{ + int error; + spa_t *spa; + + *config = NULL; + error = spa_open_common(name, &spa, FTAG, config); + + if (spa && *config != NULL) { + VERIFY(nvlist_add_uint64(*config, ZPOOL_CONFIG_ERRCOUNT, + spa_get_errlog_size(spa)) == 0); + + if (spa_suspended(spa)) + VERIFY(nvlist_add_uint64(*config, + ZPOOL_CONFIG_SUSPENDED, spa->spa_failmode) == 0); + + spa_add_spares(spa, *config); + spa_add_l2cache(spa, *config); + } + + /* + * We want to get the alternate root even for faulted pools, so we cheat + * and call spa_lookup() directly. + */ + if (altroot) { + if (spa == NULL) { + mutex_enter(&spa_namespace_lock); + spa = spa_lookup(name); + if (spa) + spa_altroot(spa, altroot, buflen); + else + altroot[0] = '\0'; + spa = NULL; + mutex_exit(&spa_namespace_lock); + } else { + spa_altroot(spa, altroot, buflen); + } + } + + if (spa != NULL) + spa_close(spa, FTAG); + + return (error); +} + +/* + * Validate that the auxiliary device array is well formed. We must have an + * array of nvlists, each which describes a valid leaf vdev. If this is an + * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be + * specified, as long as they are well-formed. + */ +static int +spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, + spa_aux_vdev_t *sav, const char *config, uint64_t version, + vdev_labeltype_t label) +{ + nvlist_t **dev; + uint_t i, ndev; + vdev_t *vd; + int error; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + /* + * It's acceptable to have no devs specified. + */ + if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) + return (0); + + if (ndev == 0) + return (EINVAL); + + /* + * Make sure the pool is formatted with a version that supports this + * device type. + */ + if (spa_version(spa) < version) + return (ENOTSUP); + + /* + * Set the pending device list so we correctly handle device in-use + * checking. + */ + sav->sav_pending = dev; + sav->sav_npending = ndev; + + for (i = 0; i < ndev; i++) { + if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, + mode)) != 0) + goto out; + + if (!vd->vdev_ops->vdev_op_leaf) { + vdev_free(vd); + error = EINVAL; + goto out; + } + + /* + * The L2ARC currently only supports disk devices in + * kernel context. For user-level testing, we allow it. + */ +#ifdef _KERNEL + if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && + strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { + error = ENOTBLK; + goto out; + } +#endif + vd->vdev_top = vd; + + if ((error = vdev_open(vd)) == 0 && + (error = vdev_label_init(vd, crtxg, label)) == 0) { + VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, + vd->vdev_guid) == 0); + } + + vdev_free(vd); + + if (error && + (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) + goto out; + else + error = 0; + } + +out: + sav->sav_pending = NULL; + sav->sav_npending = 0; + return (error); +} + +static int +spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) +{ + int error; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, + &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, + VDEV_LABEL_SPARE)) != 0) { + return (error); + } + + return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, + &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, + VDEV_LABEL_L2CACHE)); +} + +static void +spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, + const char *config) +{ + int i; + + if (sav->sav_config != NULL) { + nvlist_t **olddevs; + uint_t oldndevs; + nvlist_t **newdevs; + + /* + * Generate new dev list by concatentating with the + * current dev list. + */ + VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, + &olddevs, &oldndevs) == 0); + + newdevs = kmem_alloc(sizeof (void *) * + (ndevs + oldndevs), KM_SLEEP); + for (i = 0; i < oldndevs; i++) + VERIFY(nvlist_dup(olddevs[i], &newdevs[i], + KM_SLEEP) == 0); + for (i = 0; i < ndevs; i++) + VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], + KM_SLEEP) == 0); + + VERIFY(nvlist_remove(sav->sav_config, config, + DATA_TYPE_NVLIST_ARRAY) == 0); + + VERIFY(nvlist_add_nvlist_array(sav->sav_config, + config, newdevs, ndevs + oldndevs) == 0); + for (i = 0; i < oldndevs + ndevs; i++) + nvlist_free(newdevs[i]); + kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); + } else { + /* + * Generate a new dev list. + */ + VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, + KM_SLEEP) == 0); + VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, + devs, ndevs) == 0); + } +} + +/* + * Stop and drop level 2 ARC devices + */ +void +spa_l2cache_drop(spa_t *spa) +{ + vdev_t *vd; + int i; + spa_aux_vdev_t *sav = &spa->spa_l2cache; + + for (i = 0; i < sav->sav_count; i++) { + uint64_t pool; + + vd = sav->sav_vdevs[i]; + ASSERT(vd != NULL); + + if ((spa_mode & FWRITE) && + spa_l2cache_exists(vd->vdev_guid, &pool) && pool != 0ULL && + l2arc_vdev_present(vd)) { + l2arc_remove_vdev(vd); + } + if (vd->vdev_isl2cache) + spa_l2cache_remove(vd); + vdev_clear_stats(vd); + (void) vdev_close(vd); + } +} + +/* + * Pool Creation + */ +int +spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, + const char *history_str, nvlist_t *zplprops) +{ + spa_t *spa; + char *altroot = NULL; + vdev_t *rvd; + dsl_pool_t *dp; + dmu_tx_t *tx; + int c, error = 0; + uint64_t txg = TXG_INITIAL; + nvlist_t **spares, **l2cache; + uint_t nspares, nl2cache; + uint64_t version; + + /* + * If this pool already exists, return failure. + */ + mutex_enter(&spa_namespace_lock); + if (spa_lookup(pool) != NULL) { + mutex_exit(&spa_namespace_lock); + return (EEXIST); + } + + /* + * Allocate a new spa_t structure. + */ + (void) nvlist_lookup_string(props, + zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); + spa = spa_add(pool, altroot); + spa_activate(spa); + + spa->spa_uberblock.ub_txg = txg - 1; + + if (props && (error = spa_prop_validate(spa, props))) { + spa_unload(spa); + spa_deactivate(spa); + spa_remove(spa); + mutex_exit(&spa_namespace_lock); + return (error); + } + + if (nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), + &version) != 0) + version = SPA_VERSION; + ASSERT(version <= SPA_VERSION); + spa->spa_uberblock.ub_version = version; + spa->spa_ubsync = spa->spa_uberblock; + + /* + * Create the root vdev. + */ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + + error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); + + ASSERT(error != 0 || rvd != NULL); + ASSERT(error != 0 || spa->spa_root_vdev == rvd); + + if (error == 0 && !zfs_allocatable_devs(nvroot)) + error = EINVAL; + + if (error == 0 && + (error = vdev_create(rvd, txg, B_FALSE)) == 0 && + (error = spa_validate_aux(spa, nvroot, txg, + VDEV_ALLOC_ADD)) == 0) { + for (c = 0; c < rvd->vdev_children; c++) + vdev_init(rvd->vdev_child[c], txg); + vdev_config_dirty(rvd); + } + + spa_config_exit(spa, SCL_ALL, FTAG); + + if (error != 0) { + spa_unload(spa); + spa_deactivate(spa); + spa_remove(spa); + mutex_exit(&spa_namespace_lock); + return (error); + } + + /* + * Get the list of spares, if specified. + */ + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, + KM_SLEEP) == 0); + VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, spares, nspares) == 0); + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_spares(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + spa->spa_spares.sav_sync = B_TRUE; + } + + /* + * Get the list of level 2 cache devices, if specified. + */ + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) { + VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_l2cache(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + spa->spa_l2cache.sav_sync = B_TRUE; + } + + spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); + spa->spa_meta_objset = dp->dp_meta_objset; + + tx = dmu_tx_create_assigned(dp, txg); + + /* + * Create the pool config object. + */ + spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, + DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, + DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); + + if (zap_add(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, + sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { + cmn_err(CE_PANIC, "failed to add pool config"); + } + + /* Newly created pools with the right version are always deflated. */ + if (version >= SPA_VERSION_RAIDZ_DEFLATE) { + spa->spa_deflate = TRUE; + if (zap_add(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, + sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { + cmn_err(CE_PANIC, "failed to add deflate"); + } + } + + /* + * Create the deferred-free bplist object. Turn off compression + * because sync-to-convergence takes longer if the blocksize + * keeps changing. + */ + spa->spa_sync_bplist_obj = bplist_create(spa->spa_meta_objset, + 1 << 14, tx); + dmu_object_set_compress(spa->spa_meta_objset, spa->spa_sync_bplist_obj, + ZIO_COMPRESS_OFF, tx); + + if (zap_add(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPLIST, + sizeof (uint64_t), 1, &spa->spa_sync_bplist_obj, tx) != 0) { + cmn_err(CE_PANIC, "failed to add bplist"); + } + + /* + * Create the pool's history object. + */ + if (version >= SPA_VERSION_ZPOOL_HISTORY) + spa_history_create_obj(spa, tx); + + /* + * Set pool properties. + */ + spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); + spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); + spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); + if (props) + spa_sync_props(spa, props, CRED(), tx); + + dmu_tx_commit(tx); + + spa->spa_sync_on = B_TRUE; + txg_sync_start(spa->spa_dsl_pool); + + /* + * We explicitly wait for the first transaction to complete so that our + * bean counters are appropriately updated. + */ + txg_wait_synced(spa->spa_dsl_pool, txg); + + spa_config_sync(spa, B_FALSE, B_TRUE); + + if (version >= SPA_VERSION_ZPOOL_HISTORY && history_str != NULL) + (void) spa_history_log(spa, history_str, LOG_CMD_POOL_CREATE); + + mutex_exit(&spa_namespace_lock); + + spa->spa_minref = refcount_count(&spa->spa_refcount); + + return (0); +} + +/* + * Import the given pool into the system. We set up the necessary spa_t and + * then call spa_load() to do the dirty work. + */ +static int +spa_import_common(const char *pool, nvlist_t *config, nvlist_t *props, + boolean_t isroot, boolean_t allowfaulted) +{ + spa_t *spa; + char *altroot = NULL; + int error, loaderr; + nvlist_t *nvroot; + nvlist_t **spares, **l2cache; + uint_t nspares, nl2cache; + + /* + * If a pool with this name exists, return failure. + */ + mutex_enter(&spa_namespace_lock); + if ((spa = spa_lookup(pool)) != NULL) { + if (isroot) { + /* + * Remove the existing root pool from the + * namespace so that we can replace it with + * the correct config we just read in. + */ + ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); + spa_remove(spa); + } else { + mutex_exit(&spa_namespace_lock); + return (EEXIST); + } + } + + /* + * Create and initialize the spa structure. + */ + (void) nvlist_lookup_string(props, + zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); + spa = spa_add(pool, altroot); + spa_activate(spa); + + if (allowfaulted) + spa->spa_import_faulted = B_TRUE; + spa->spa_is_root = isroot; + + /* + * Pass off the heavy lifting to spa_load(). + * Pass TRUE for mosconfig (unless this is a root pool) because + * the user-supplied config is actually the one to trust when + * doing an import. + */ + loaderr = error = spa_load(spa, config, SPA_LOAD_IMPORT, !isroot); + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + /* + * Toss any existing sparelist, as it doesn't have any validity anymore, + * and conflicts with spa_has_spare(). + */ + if (!isroot && spa->spa_spares.sav_config) { + nvlist_free(spa->spa_spares.sav_config); + spa->spa_spares.sav_config = NULL; + spa_load_spares(spa); + } + if (!isroot && spa->spa_l2cache.sav_config) { + nvlist_free(spa->spa_l2cache.sav_config); + spa->spa_l2cache.sav_config = NULL; + spa_load_l2cache(spa); + } + + VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, + &nvroot) == 0); + if (error == 0) + error = spa_validate_aux(spa, nvroot, -1ULL, VDEV_ALLOC_SPARE); + if (error == 0) + error = spa_validate_aux(spa, nvroot, -1ULL, + VDEV_ALLOC_L2CACHE); + spa_config_exit(spa, SCL_ALL, FTAG); + + if (error != 0 || (props && (error = spa_prop_set(spa, props)))) { + if (loaderr != 0 && loaderr != EINVAL && allowfaulted) { + /* + * If we failed to load the pool, but 'allowfaulted' is + * set, then manually set the config as if the config + * passed in was specified in the cache file. + */ + error = 0; + spa->spa_import_faulted = B_FALSE; + if (spa->spa_config == NULL) + spa->spa_config = spa_config_generate(spa, + NULL, -1ULL, B_TRUE); + spa_unload(spa); + spa_deactivate(spa); + spa_config_sync(spa, B_FALSE, B_TRUE); + } else { + spa_unload(spa); + spa_deactivate(spa); + spa_remove(spa); + } + mutex_exit(&spa_namespace_lock); + return (error); + } + + /* + * Override any spares and level 2 cache devices as specified by + * the user, as these may have correct device names/devids, etc. + */ + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0) { + if (spa->spa_spares.sav_config) + VERIFY(nvlist_remove(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); + else + VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, spares, nspares) == 0); + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_spares(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + spa->spa_spares.sav_sync = B_TRUE; + } + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache) == 0) { + if (spa->spa_l2cache.sav_config) + VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); + else + VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa_load_l2cache(spa); + spa_config_exit(spa, SCL_ALL, FTAG); + spa->spa_l2cache.sav_sync = B_TRUE; + } + + if (spa_mode & FWRITE) { + /* + * Update the config cache to include the newly-imported pool. + */ + spa_config_update_common(spa, SPA_CONFIG_UPDATE_POOL, isroot); + } + + spa->spa_import_faulted = B_FALSE; + mutex_exit(&spa_namespace_lock); + + return (0); +} + +#ifdef _KERNEL +/* + * Build a "root" vdev for a top level vdev read in from a rootpool + * device label. + */ +static void +spa_build_rootpool_config(nvlist_t *config) +{ + nvlist_t *nvtop, *nvroot; + uint64_t pgid; + + /* + * Add this top-level vdev to the child array. + */ + VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtop) + == 0); + VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pgid) + == 0); + + /* + * Put this pool's top-level vdevs into a root vdev. + */ + VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) + == 0); + VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); + VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); + VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, + &nvtop, 1) == 0); + + /* + * Replace the existing vdev_tree with the new root vdev in + * this pool's configuration (remove the old, add the new). + */ + VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); + nvlist_free(nvroot); +} + +/* + * Get the root pool information from the root disk, then import the root pool + * during the system boot up time. + */ +extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); + +int +spa_check_rootconf(char *devpath, char *devid, nvlist_t **bestconf, + uint64_t *besttxg) +{ + nvlist_t *config; + uint64_t txg; + int error; + + if (error = vdev_disk_read_rootlabel(devpath, devid, &config)) + return (error); + + VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); + + if (bestconf != NULL) + *bestconf = config; + else + nvlist_free(config); + *besttxg = txg; + return (0); +} + +boolean_t +spa_rootdev_validate(nvlist_t *nv) +{ + uint64_t ival; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || + nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) + return (B_FALSE); + + return (B_TRUE); +} + + +/* + * Given the boot device's physical path or devid, check if the device + * is in a valid state. If so, return the configuration from the vdev + * label. + */ +int +spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf) +{ + nvlist_t *conf = NULL; + uint64_t txg = 0; + nvlist_t *nvtop, **child; + char *type; + char *bootpath = NULL; + uint_t children, c; + char *tmp; + int error; + + if (devpath && ((tmp = strchr(devpath, ' ')) != NULL)) + *tmp = '\0'; + if (error = spa_check_rootconf(devpath, devid, &conf, &txg)) { + cmn_err(CE_NOTE, "error reading device label"); + return (error); + } + if (txg == 0) { + cmn_err(CE_NOTE, "this device is detached"); + nvlist_free(conf); + return (EINVAL); + } + + VERIFY(nvlist_lookup_nvlist(conf, ZPOOL_CONFIG_VDEV_TREE, + &nvtop) == 0); + VERIFY(nvlist_lookup_string(nvtop, ZPOOL_CONFIG_TYPE, &type) == 0); + + if (strcmp(type, VDEV_TYPE_DISK) == 0) { + if (spa_rootdev_validate(nvtop)) { + goto out; + } else { + nvlist_free(conf); + return (EINVAL); + } + } + + ASSERT(strcmp(type, VDEV_TYPE_MIRROR) == 0); + + VERIFY(nvlist_lookup_nvlist_array(nvtop, ZPOOL_CONFIG_CHILDREN, + &child, &children) == 0); + + /* + * Go thru vdevs in the mirror to see if the given device + * has the most recent txg. Only the device with the most + * recent txg has valid information and should be booted. + */ + for (c = 0; c < children; c++) { + char *cdevid, *cpath; + uint64_t tmptxg; + + if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_PHYS_PATH, + &cpath) != 0) + return (EINVAL); + if (nvlist_lookup_string(child[c], ZPOOL_CONFIG_DEVID, + &cdevid) != 0) + return (EINVAL); + if ((spa_check_rootconf(cpath, cdevid, NULL, + &tmptxg) == 0) && (tmptxg > txg)) { + txg = tmptxg; + VERIFY(nvlist_lookup_string(child[c], + ZPOOL_CONFIG_PATH, &bootpath) == 0); + } + } + + /* Does the best device match the one we've booted from? */ + if (bootpath) { + cmn_err(CE_NOTE, "try booting from '%s'", bootpath); + return (EINVAL); + } +out: + *bestconf = conf; + return (0); +} + +/* + * Import a root pool. + * + * For x86. devpath_list will consist of devid and/or physpath name of + * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). + * The GRUB "findroot" command will return the vdev we should boot. + * + * For Sparc, devpath_list consists the physpath name of the booting device + * no matter the rootpool is a single device pool or a mirrored pool. + * e.g. + * "/pci@1f,0/ide@d/disk@0,0:a" + */ +int +spa_import_rootpool(char *devpath, char *devid) +{ + nvlist_t *conf = NULL; + char *pname; + int error; + + /* + * Get the vdev pathname and configuation from the most + * recently updated vdev (highest txg). + */ + if (error = spa_get_rootconf(devpath, devid, &conf)) + goto msg_out; + + /* + * Add type "root" vdev to the config. + */ + spa_build_rootpool_config(conf); + + VERIFY(nvlist_lookup_string(conf, ZPOOL_CONFIG_POOL_NAME, &pname) == 0); + + /* + * We specify 'allowfaulted' for this to be treated like spa_open() + * instead of spa_import(). This prevents us from marking vdevs as + * persistently unavailable, and generates FMA ereports as if it were a + * pool open, not import. + */ + error = spa_import_common(pname, conf, NULL, B_TRUE, B_TRUE); + ASSERT(error != EEXIST); + + nvlist_free(conf); + return (error); + +msg_out: + cmn_err(CE_NOTE, "\n" + " *************************************************** \n" + " * This device is not bootable! * \n" + " * It is either offlined or detached or faulted. * \n" + " * Please try to boot from a different device. * \n" + " *************************************************** "); + + return (error); +} +#endif + +/* + * Import a non-root pool into the system. + */ +int +spa_import(const char *pool, nvlist_t *config, nvlist_t *props) +{ + return (spa_import_common(pool, config, props, B_FALSE, B_FALSE)); +} + +int +spa_import_faulted(const char *pool, nvlist_t *config, nvlist_t *props) +{ + return (spa_import_common(pool, config, props, B_FALSE, B_TRUE)); +} + + +/* + * This (illegal) pool name is used when temporarily importing a spa_t in order + * to get the vdev stats associated with the imported devices. + */ +#define TRYIMPORT_NAME "$import" + +nvlist_t * +spa_tryimport(nvlist_t *tryconfig) +{ + nvlist_t *config = NULL; + char *poolname; + spa_t *spa; + uint64_t state; + + if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) + return (NULL); + + if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) + return (NULL); + + /* + * Create and initialize the spa structure. + */ + mutex_enter(&spa_namespace_lock); + spa = spa_add(TRYIMPORT_NAME, NULL); + spa_activate(spa); + + /* + * Pass off the heavy lifting to spa_load(). + * Pass TRUE for mosconfig because the user-supplied config + * is actually the one to trust when doing an import. + */ + (void) spa_load(spa, tryconfig, SPA_LOAD_TRYIMPORT, B_TRUE); + + /* + * If 'tryconfig' was at least parsable, return the current config. + */ + if (spa->spa_root_vdev != NULL) { + config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); + VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, + poolname) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, + state) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, + spa->spa_uberblock.ub_timestamp) == 0); + + /* + * If the bootfs property exists on this pool then we + * copy it out so that external consumers can tell which + * pools are bootable. + */ + if (spa->spa_bootfs) { + char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); + + /* + * We have to play games with the name since the + * pool was opened as TRYIMPORT_NAME. + */ + if (dsl_dsobj_to_dsname(spa_name(spa), + spa->spa_bootfs, tmpname) == 0) { + char *cp; + char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); + + cp = strchr(tmpname, '/'); + if (cp == NULL) { + (void) strlcpy(dsname, tmpname, + MAXPATHLEN); + } else { + (void) snprintf(dsname, MAXPATHLEN, + "%s/%s", poolname, ++cp); + } + VERIFY(nvlist_add_string(config, + ZPOOL_CONFIG_BOOTFS, dsname) == 0); + kmem_free(dsname, MAXPATHLEN); + } + kmem_free(tmpname, MAXPATHLEN); + } + + /* + * Add the list of hot spares and level 2 cache devices. + */ + spa_add_spares(spa, config); + spa_add_l2cache(spa, config); + } + + spa_unload(spa); + spa_deactivate(spa); + spa_remove(spa); + mutex_exit(&spa_namespace_lock); + + return (config); +} + +/* + * Pool export/destroy + * + * The act of destroying or exporting a pool is very simple. We make sure there + * is no more pending I/O and any references to the pool are gone. Then, we + * update the pool state and sync all the labels to disk, removing the + * configuration from the cache afterwards. + */ +static int +spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, + boolean_t force) +{ + spa_t *spa; + + if (oldconfig) + *oldconfig = NULL; + + if (!(spa_mode & FWRITE)) + return (EROFS); + + mutex_enter(&spa_namespace_lock); + if ((spa = spa_lookup(pool)) == NULL) { + mutex_exit(&spa_namespace_lock); + return (ENOENT); + } + + /* + * Put a hold on the pool, drop the namespace lock, stop async tasks, + * reacquire the namespace lock, and see if we can export. + */ + spa_open_ref(spa, FTAG); + mutex_exit(&spa_namespace_lock); + spa_async_suspend(spa); + mutex_enter(&spa_namespace_lock); + spa_close(spa, FTAG); + + /* + * The pool will be in core if it's openable, + * in which case we can modify its state. + */ + if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { + /* + * Objsets may be open only because they're dirty, so we + * have to force it to sync before checking spa_refcnt. + */ + txg_wait_synced(spa->spa_dsl_pool, 0); + + /* + * A pool cannot be exported or destroyed if there are active + * references. If we are resetting a pool, allow references by + * fault injection handlers. + */ + if (!spa_refcount_zero(spa) || + (spa->spa_inject_ref != 0 && + new_state != POOL_STATE_UNINITIALIZED)) { + spa_async_resume(spa); + mutex_exit(&spa_namespace_lock); + return (EBUSY); + } + + /* + * A pool cannot be exported if it has an active shared spare. + * This is to prevent other pools stealing the active spare + * from an exported pool. At user's own will, such pool can + * be forcedly exported. + */ + if (!force && new_state == POOL_STATE_EXPORTED && + spa_has_active_shared_spare(spa)) { + spa_async_resume(spa); + mutex_exit(&spa_namespace_lock); + return (EXDEV); + } + + /* + * We want this to be reflected on every label, + * so mark them all dirty. spa_unload() will do the + * final sync that pushes these changes out. + */ + if (new_state != POOL_STATE_UNINITIALIZED) { + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + spa->spa_state = new_state; + spa->spa_final_txg = spa_last_synced_txg(spa) + 1; + vdev_config_dirty(spa->spa_root_vdev); + spa_config_exit(spa, SCL_ALL, FTAG); + } + } + + spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); + + if (spa->spa_state != POOL_STATE_UNINITIALIZED) { + spa_unload(spa); + spa_deactivate(spa); + } + + if (oldconfig && spa->spa_config) + VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); + + if (new_state != POOL_STATE_UNINITIALIZED) { + spa_config_sync(spa, B_TRUE, B_TRUE); + spa_remove(spa); + } + mutex_exit(&spa_namespace_lock); + + return (0); +} + +/* + * Destroy a storage pool. + */ +int +spa_destroy(char *pool) +{ + return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, B_FALSE)); +} + +/* + * Export a storage pool. + */ +int +spa_export(char *pool, nvlist_t **oldconfig, boolean_t force) +{ + return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, force)); +} + +/* + * Similar to spa_export(), this unloads the spa_t without actually removing it + * from the namespace in any way. + */ +int +spa_reset(char *pool) +{ + return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, + B_FALSE)); +} + +/* + * ========================================================================== + * Device manipulation + * ========================================================================== + */ + +/* + * Add a device to a storage pool. + */ +int +spa_vdev_add(spa_t *spa, nvlist_t *nvroot) +{ + uint64_t txg; + int c, error; + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *vd, *tvd; + nvlist_t **spares, **l2cache; + uint_t nspares, nl2cache; + + txg = spa_vdev_enter(spa); + + if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, + VDEV_ALLOC_ADD)) != 0) + return (spa_vdev_exit(spa, NULL, txg, error)); + + spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, + &nspares) != 0) + nspares = 0; + + if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, + &nl2cache) != 0) + nl2cache = 0; + + if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) + return (spa_vdev_exit(spa, vd, txg, EINVAL)); + + if (vd->vdev_children != 0 && + (error = vdev_create(vd, txg, B_FALSE)) != 0) + return (spa_vdev_exit(spa, vd, txg, error)); + + /* + * We must validate the spares and l2cache devices after checking the + * children. Otherwise, vdev_inuse() will blindly overwrite the spare. + */ + if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) + return (spa_vdev_exit(spa, vd, txg, error)); + + /* + * Transfer each new top-level vdev from vd to rvd. + */ + for (c = 0; c < vd->vdev_children; c++) { + tvd = vd->vdev_child[c]; + vdev_remove_child(vd, tvd); + tvd->vdev_id = rvd->vdev_children; + vdev_add_child(rvd, tvd); + vdev_config_dirty(tvd); + } + + if (nspares != 0) { + spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, + ZPOOL_CONFIG_SPARES); + spa_load_spares(spa); + spa->spa_spares.sav_sync = B_TRUE; + } + + if (nl2cache != 0) { + spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, + ZPOOL_CONFIG_L2CACHE); + spa_load_l2cache(spa); + spa->spa_l2cache.sav_sync = B_TRUE; + } + + /* + * We have to be careful when adding new vdevs to an existing pool. + * If other threads start allocating from these vdevs before we + * sync the config cache, and we lose power, then upon reboot we may + * fail to open the pool because there are DVAs that the config cache + * can't translate. Therefore, we first add the vdevs without + * initializing metaslabs; sync the config cache (via spa_vdev_exit()); + * and then let spa_config_update() initialize the new metaslabs. + * + * spa_load() checks for added-but-not-initialized vdevs, so that + * if we lose power at any point in this sequence, the remaining + * steps will be completed the next time we load the pool. + */ + (void) spa_vdev_exit(spa, vd, txg, 0); + + mutex_enter(&spa_namespace_lock); + spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); + mutex_exit(&spa_namespace_lock); + + return (0); +} + +/* + * Attach a device to a mirror. The arguments are the path to any device + * in the mirror, and the nvroot for the new device. If the path specifies + * a device that is not mirrored, we automatically insert the mirror vdev. + * + * If 'replacing' is specified, the new device is intended to replace the + * existing device; in this case the two devices are made into their own + * mirror using the 'replacing' vdev, which is functionally identical to + * the mirror vdev (it actually reuses all the same ops) but has a few + * extra rules: you can't attach to it after it's been created, and upon + * completion of resilvering, the first disk (the one being replaced) + * is automatically detached. + */ +int +spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) +{ + uint64_t txg, open_txg; + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; + vdev_ops_t *pvops; + dmu_tx_t *tx; + char *oldvdpath, *newvdpath; + int newvd_isspare; + int error; + + txg = spa_vdev_enter(spa); + + oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); + + if (oldvd == NULL) + return (spa_vdev_exit(spa, NULL, txg, ENODEV)); + + if (!oldvd->vdev_ops->vdev_op_leaf) + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + + pvd = oldvd->vdev_parent; + + if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, + VDEV_ALLOC_ADD)) != 0) + return (spa_vdev_exit(spa, NULL, txg, EINVAL)); + + if (newrootvd->vdev_children != 1) + return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); + + newvd = newrootvd->vdev_child[0]; + + if (!newvd->vdev_ops->vdev_op_leaf) + return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); + + if ((error = vdev_create(newrootvd, txg, replacing)) != 0) + return (spa_vdev_exit(spa, newrootvd, txg, error)); + + /* + * Spares can't replace logs + */ + if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) + return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); + + if (!replacing) { + /* + * For attach, the only allowable parent is a mirror or the root + * vdev. + */ + if (pvd->vdev_ops != &vdev_mirror_ops && + pvd->vdev_ops != &vdev_root_ops) + return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); + + pvops = &vdev_mirror_ops; + } else { + /* + * Active hot spares can only be replaced by inactive hot + * spares. + */ + if (pvd->vdev_ops == &vdev_spare_ops && + pvd->vdev_child[1] == oldvd && + !spa_has_spare(spa, newvd->vdev_guid)) + return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); + + /* + * If the source is a hot spare, and the parent isn't already a + * spare, then we want to create a new hot spare. Otherwise, we + * want to create a replacing vdev. The user is not allowed to + * attach to a spared vdev child unless the 'isspare' state is + * the same (spare replaces spare, non-spare replaces + * non-spare). + */ + if (pvd->vdev_ops == &vdev_replacing_ops) + return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); + else if (pvd->vdev_ops == &vdev_spare_ops && + newvd->vdev_isspare != oldvd->vdev_isspare) + return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); + else if (pvd->vdev_ops != &vdev_spare_ops && + newvd->vdev_isspare) + pvops = &vdev_spare_ops; + else + pvops = &vdev_replacing_ops; + } + + /* + * Compare the new device size with the replaceable/attachable + * device size. + */ + if (newvd->vdev_psize < vdev_get_rsize(oldvd)) + return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); + + /* + * The new device cannot have a higher alignment requirement + * than the top-level vdev. + */ + if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) + return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); + + /* + * If this is an in-place replacement, update oldvd's path and devid + * to make it distinguishable from newvd, and unopenable from now on. + */ + if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { + spa_strfree(oldvd->vdev_path); + oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, + KM_SLEEP); + (void) sprintf(oldvd->vdev_path, "%s/%s", + newvd->vdev_path, "old"); + if (oldvd->vdev_devid != NULL) { + spa_strfree(oldvd->vdev_devid); + oldvd->vdev_devid = NULL; + } + } + + /* + * If the parent is not a mirror, or if we're replacing, insert the new + * mirror/replacing/spare vdev above oldvd. + */ + if (pvd->vdev_ops != pvops) + pvd = vdev_add_parent(oldvd, pvops); + + ASSERT(pvd->vdev_top->vdev_parent == rvd); + ASSERT(pvd->vdev_ops == pvops); + ASSERT(oldvd->vdev_parent == pvd); + + /* + * Extract the new device from its root and add it to pvd. + */ + vdev_remove_child(newrootvd, newvd); + newvd->vdev_id = pvd->vdev_children; + vdev_add_child(pvd, newvd); + + /* + * If newvd is smaller than oldvd, but larger than its rsize, + * the addition of newvd may have decreased our parent's asize. + */ + pvd->vdev_asize = MIN(pvd->vdev_asize, newvd->vdev_asize); + + tvd = newvd->vdev_top; + ASSERT(pvd->vdev_top == tvd); + ASSERT(tvd->vdev_parent == rvd); + + vdev_config_dirty(tvd); + + /* + * Set newvd's DTL to [TXG_INITIAL, open_txg]. It will propagate + * upward when spa_vdev_exit() calls vdev_dtl_reassess(). + */ + open_txg = txg + TXG_CONCURRENT_STATES - 1; + + mutex_enter(&newvd->vdev_dtl_lock); + space_map_add(&newvd->vdev_dtl_map, TXG_INITIAL, + open_txg - TXG_INITIAL + 1); + mutex_exit(&newvd->vdev_dtl_lock); + + if (newvd->vdev_isspare) + spa_spare_activate(newvd); + oldvdpath = spa_strdup(oldvd->vdev_path); + newvdpath = spa_strdup(newvd->vdev_path); + newvd_isspare = newvd->vdev_isspare; + + /* + * Mark newvd's DTL dirty in this txg. + */ + vdev_dirty(tvd, VDD_DTL, newvd, txg); + + (void) spa_vdev_exit(spa, newrootvd, open_txg, 0); + + tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); + if (dmu_tx_assign(tx, TXG_WAIT) == 0) { + spa_history_internal_log(LOG_POOL_VDEV_ATTACH, spa, tx, + CRED(), "%s vdev=%s %s vdev=%s", + replacing && newvd_isspare ? "spare in" : + replacing ? "replace" : "attach", newvdpath, + replacing ? "for" : "to", oldvdpath); + dmu_tx_commit(tx); + } else { + dmu_tx_abort(tx); + } + + spa_strfree(oldvdpath); + spa_strfree(newvdpath); + + /* + * Kick off a resilver to update newvd. + */ + VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); + + return (0); +} + +/* + * Detach a device from a mirror or replacing vdev. + * If 'replace_done' is specified, only detach if the parent + * is a replacing vdev. + */ +int +spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done) +{ + uint64_t txg; + int c, t, error; + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *vd, *pvd, *cvd, *tvd; + boolean_t unspare = B_FALSE; + uint64_t unspare_guid; + size_t len; + + txg = spa_vdev_enter(spa); + + vd = spa_lookup_by_guid(spa, guid, B_FALSE); + + if (vd == NULL) + return (spa_vdev_exit(spa, NULL, txg, ENODEV)); + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + + pvd = vd->vdev_parent; + + /* + * If replace_done is specified, only remove this device if it's + * the first child of a replacing vdev. For the 'spare' vdev, either + * disk can be removed. + */ + if (replace_done) { + if (pvd->vdev_ops == &vdev_replacing_ops) { + if (vd->vdev_id != 0) + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + } else if (pvd->vdev_ops != &vdev_spare_ops) { + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + } + } + + ASSERT(pvd->vdev_ops != &vdev_spare_ops || + spa_version(spa) >= SPA_VERSION_SPARES); + + /* + * Only mirror, replacing, and spare vdevs support detach. + */ + if (pvd->vdev_ops != &vdev_replacing_ops && + pvd->vdev_ops != &vdev_mirror_ops && + pvd->vdev_ops != &vdev_spare_ops) + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + + /* + * If there's only one replica, you can't detach it. + */ + if (pvd->vdev_children <= 1) + return (spa_vdev_exit(spa, NULL, txg, EBUSY)); + + /* + * If all siblings have non-empty DTLs, this device may have the only + * valid copy of the data, which means we cannot safely detach it. + * + * XXX -- as in the vdev_offline() case, we really want a more + * precise DTL check. + */ + for (c = 0; c < pvd->vdev_children; c++) { + uint64_t dirty; + + cvd = pvd->vdev_child[c]; + if (cvd == vd) + continue; + if (vdev_is_dead(cvd)) + continue; + mutex_enter(&cvd->vdev_dtl_lock); + dirty = cvd->vdev_dtl_map.sm_space | + cvd->vdev_dtl_scrub.sm_space; + mutex_exit(&cvd->vdev_dtl_lock); + if (!dirty) + break; + } + + if (c == pvd->vdev_children) + return (spa_vdev_exit(spa, NULL, txg, EBUSY)); + + /* + * If we are detaching the second disk from a replacing vdev, then + * check to see if we changed the original vdev's path to have "/old" + * at the end in spa_vdev_attach(). If so, undo that change now. + */ + if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id == 1 && + pvd->vdev_child[0]->vdev_path != NULL && + pvd->vdev_child[1]->vdev_path != NULL) { + ASSERT(pvd->vdev_child[1] == vd); + cvd = pvd->vdev_child[0]; + len = strlen(vd->vdev_path); + if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && + strcmp(cvd->vdev_path + len, "/old") == 0) { + spa_strfree(cvd->vdev_path); + cvd->vdev_path = spa_strdup(vd->vdev_path); + } + } + + /* + * If we are detaching the original disk from a spare, then it implies + * that the spare should become a real disk, and be removed from the + * active spare list for the pool. + */ + if (pvd->vdev_ops == &vdev_spare_ops && + vd->vdev_id == 0) + unspare = B_TRUE; + + /* + * Erase the disk labels so the disk can be used for other things. + * This must be done after all other error cases are handled, + * but before we disembowel vd (so we can still do I/O to it). + * But if we can't do it, don't treat the error as fatal -- + * it may be that the unwritability of the disk is the reason + * it's being detached! + */ + error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); + + /* + * Remove vd from its parent and compact the parent's children. + */ + vdev_remove_child(pvd, vd); + vdev_compact_children(pvd); + + /* + * Remember one of the remaining children so we can get tvd below. + */ + cvd = pvd->vdev_child[0]; + + /* + * If we need to remove the remaining child from the list of hot spares, + * do it now, marking the vdev as no longer a spare in the process. We + * must do this before vdev_remove_parent(), because that can change the + * GUID if it creates a new toplevel GUID. + */ + if (unspare) { + ASSERT(cvd->vdev_isspare); + spa_spare_remove(cvd); + unspare_guid = cvd->vdev_guid; + } + + /* + * If the parent mirror/replacing vdev only has one child, + * the parent is no longer needed. Remove it from the tree. + */ + if (pvd->vdev_children == 1) + vdev_remove_parent(cvd); + + /* + * We don't set tvd until now because the parent we just removed + * may have been the previous top-level vdev. + */ + tvd = cvd->vdev_top; + ASSERT(tvd->vdev_parent == rvd); + + /* + * Reevaluate the parent vdev state. + */ + vdev_propagate_state(cvd); + + /* + * If the device we just detached was smaller than the others, it may be + * possible to add metaslabs (i.e. grow the pool). vdev_metaslab_init() + * can't fail because the existing metaslabs are already in core, so + * there's nothing to read from disk. + */ + VERIFY(vdev_metaslab_init(tvd, txg) == 0); + + vdev_config_dirty(tvd); + + /* + * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that + * vd->vdev_detached is set and free vd's DTL object in syncing context. + * But first make sure we're not on any *other* txg's DTL list, to + * prevent vd from being accessed after it's freed. + */ + for (t = 0; t < TXG_SIZE; t++) + (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); + vd->vdev_detached = B_TRUE; + vdev_dirty(tvd, VDD_DTL, vd, txg); + + spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); + + error = spa_vdev_exit(spa, vd, txg, 0); + + /* + * If this was the removal of the original device in a hot spare vdev, + * then we want to go through and remove the device from the hot spare + * list of every other pool. + */ + if (unspare) { + spa = NULL; + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(spa)) != NULL) { + if (spa->spa_state != POOL_STATE_ACTIVE) + continue; + spa_open_ref(spa, FTAG); + mutex_exit(&spa_namespace_lock); + (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); + mutex_enter(&spa_namespace_lock); + spa_close(spa, FTAG); + } + mutex_exit(&spa_namespace_lock); + } + + return (error); +} + +static nvlist_t * +spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) +{ + for (int i = 0; i < count; i++) { + uint64_t guid; + + VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, + &guid) == 0); + + if (guid == target_guid) + return (nvpp[i]); + } + + return (NULL); +} + +static void +spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, + nvlist_t *dev_to_remove) +{ + nvlist_t **newdev = NULL; + + if (count > 1) + newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); + + for (int i = 0, j = 0; i < count; i++) { + if (dev[i] == dev_to_remove) + continue; + VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); + } + + VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); + VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); + + for (int i = 0; i < count - 1; i++) + nvlist_free(newdev[i]); + + if (count > 1) + kmem_free(newdev, (count - 1) * sizeof (void *)); +} + +/* + * Remove a device from the pool. Currently, this supports removing only hot + * spares and level 2 ARC devices. + */ +int +spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) +{ + vdev_t *vd; + nvlist_t **spares, **l2cache, *nv; + uint_t nspares, nl2cache; + uint64_t txg; + int error = 0; + + txg = spa_vdev_enter(spa); + + vd = spa_lookup_by_guid(spa, guid, B_FALSE); + + if (spa->spa_spares.sav_vdevs != NULL && + nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && + (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { + /* + * Only remove the hot spare if it's not currently in use + * in this pool. + */ + if (vd == NULL || unspare) { + spa_vdev_remove_aux(spa->spa_spares.sav_config, + ZPOOL_CONFIG_SPARES, spares, nspares, nv); + spa_load_spares(spa); + spa->spa_spares.sav_sync = B_TRUE; + } else { + error = EBUSY; + } + } else if (spa->spa_l2cache.sav_vdevs != NULL && + nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && + (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { + /* + * Cache devices can always be removed. + */ + spa_vdev_remove_aux(spa->spa_l2cache.sav_config, + ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); + spa_load_l2cache(spa); + spa->spa_l2cache.sav_sync = B_TRUE; + } else if (vd != NULL) { + /* + * Normal vdevs cannot be removed (yet). + */ + error = ENOTSUP; + } else { + /* + * There is no vdev of any kind with the specified guid. + */ + error = ENOENT; + } + + return (spa_vdev_exit(spa, NULL, txg, error)); +} + +/* + * Find any device that's done replacing, or a vdev marked 'unspare' that's + * current spared, so we can detach it. + */ +static vdev_t * +spa_vdev_resilver_done_hunt(vdev_t *vd) +{ + vdev_t *newvd, *oldvd; + int c; + + for (c = 0; c < vd->vdev_children; c++) { + oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); + if (oldvd != NULL) + return (oldvd); + } + + /* + * Check for a completed replacement. + */ + if (vd->vdev_ops == &vdev_replacing_ops && vd->vdev_children == 2) { + oldvd = vd->vdev_child[0]; + newvd = vd->vdev_child[1]; + + mutex_enter(&newvd->vdev_dtl_lock); + if (newvd->vdev_dtl_map.sm_space == 0 && + newvd->vdev_dtl_scrub.sm_space == 0) { + mutex_exit(&newvd->vdev_dtl_lock); + return (oldvd); + } + mutex_exit(&newvd->vdev_dtl_lock); + } + + /* + * Check for a completed resilver with the 'unspare' flag set. + */ + if (vd->vdev_ops == &vdev_spare_ops && vd->vdev_children == 2) { + newvd = vd->vdev_child[0]; + oldvd = vd->vdev_child[1]; + + mutex_enter(&newvd->vdev_dtl_lock); + if (newvd->vdev_unspare && + newvd->vdev_dtl_map.sm_space == 0 && + newvd->vdev_dtl_scrub.sm_space == 0) { + newvd->vdev_unspare = 0; + mutex_exit(&newvd->vdev_dtl_lock); + return (oldvd); + } + mutex_exit(&newvd->vdev_dtl_lock); + } + + return (NULL); +} + +static void +spa_vdev_resilver_done(spa_t *spa) +{ + vdev_t *vd; + vdev_t *pvd; + uint64_t guid; + uint64_t pguid = 0; + + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + + while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { + guid = vd->vdev_guid; + /* + * If we have just finished replacing a hot spared device, then + * we need to detach the parent's first child (the original hot + * spare) as well. + */ + pvd = vd->vdev_parent; + if (pvd->vdev_parent->vdev_ops == &vdev_spare_ops && + pvd->vdev_id == 0) { + ASSERT(pvd->vdev_ops == &vdev_replacing_ops); + ASSERT(pvd->vdev_parent->vdev_children == 2); + pguid = pvd->vdev_parent->vdev_child[1]->vdev_guid; + } + spa_config_exit(spa, SCL_CONFIG, FTAG); + if (spa_vdev_detach(spa, guid, B_TRUE) != 0) + return; + if (pguid != 0 && spa_vdev_detach(spa, pguid, B_TRUE) != 0) + return; + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + } + + spa_config_exit(spa, SCL_CONFIG, FTAG); +} + +/* + * Update the stored path for this vdev. Dirty the vdev configuration, relying + * on spa_vdev_enter/exit() to synchronize the labels and cache. + */ +int +spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) +{ + vdev_t *vd; + uint64_t txg; + + txg = spa_vdev_enter(spa); + + if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) { + /* + * Determine if this is a reference to a hot spare device. If + * it is, update the path manually as there is no associated + * vdev_t that can be synced to disk. + */ + nvlist_t **spares; + uint_t i, nspares; + + if (spa->spa_spares.sav_config != NULL) { + VERIFY(nvlist_lookup_nvlist_array( + spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, + &spares, &nspares) == 0); + for (i = 0; i < nspares; i++) { + uint64_t theguid; + VERIFY(nvlist_lookup_uint64(spares[i], + ZPOOL_CONFIG_GUID, &theguid) == 0); + if (theguid == guid) { + VERIFY(nvlist_add_string(spares[i], + ZPOOL_CONFIG_PATH, newpath) == 0); + spa_load_spares(spa); + spa->spa_spares.sav_sync = B_TRUE; + return (spa_vdev_exit(spa, NULL, txg, + 0)); + } + } + } + + return (spa_vdev_exit(spa, NULL, txg, ENOENT)); + } + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); + + spa_strfree(vd->vdev_path); + vd->vdev_path = spa_strdup(newpath); + + vdev_config_dirty(vd->vdev_top); + + return (spa_vdev_exit(spa, NULL, txg, 0)); +} + +/* + * ========================================================================== + * SPA Scrubbing + * ========================================================================== + */ + +int +spa_scrub(spa_t *spa, pool_scrub_type_t type) +{ + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); + + if ((uint_t)type >= POOL_SCRUB_TYPES) + return (ENOTSUP); + + /* + * If a resilver was requested, but there is no DTL on a + * writeable leaf device, we have nothing to do. + */ + if (type == POOL_SCRUB_RESILVER && + !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { + spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); + return (0); + } + + if (type == POOL_SCRUB_EVERYTHING && + spa->spa_dsl_pool->dp_scrub_func != SCRUB_FUNC_NONE && + spa->spa_dsl_pool->dp_scrub_isresilver) + return (EBUSY); + + if (type == POOL_SCRUB_EVERYTHING || type == POOL_SCRUB_RESILVER) { + return (dsl_pool_scrub_clean(spa->spa_dsl_pool)); + } else if (type == POOL_SCRUB_NONE) { + return (dsl_pool_scrub_cancel(spa->spa_dsl_pool)); + } else { + return (EINVAL); + } +} + +/* + * ========================================================================== + * SPA async task processing + * ========================================================================== + */ + +static void +spa_async_remove(spa_t *spa, vdev_t *vd) +{ + if (vd->vdev_remove_wanted) { + vd->vdev_remove_wanted = 0; + vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); + vdev_clear(spa, vd); + vdev_state_dirty(vd->vdev_top); + } + + for (int c = 0; c < vd->vdev_children; c++) + spa_async_remove(spa, vd->vdev_child[c]); +} + +static void +spa_async_probe(spa_t *spa, vdev_t *vd) +{ + if (vd->vdev_probe_wanted) { + vd->vdev_probe_wanted = 0; + vdev_reopen(vd); /* vdev_open() does the actual probe */ + } + + for (int c = 0; c < vd->vdev_children; c++) + spa_async_probe(spa, vd->vdev_child[c]); +} + +static void +spa_async_thread(spa_t *spa) +{ + int tasks; + + ASSERT(spa->spa_sync_on); + + mutex_enter(&spa->spa_async_lock); + tasks = spa->spa_async_tasks; + spa->spa_async_tasks = 0; + mutex_exit(&spa->spa_async_lock); + + /* + * See if the config needs to be updated. + */ + if (tasks & SPA_ASYNC_CONFIG_UPDATE) { + mutex_enter(&spa_namespace_lock); + spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); + mutex_exit(&spa_namespace_lock); + } + + /* + * See if any devices need to be marked REMOVED. + */ + if (tasks & SPA_ASYNC_REMOVE) { + spa_vdev_state_enter(spa); + spa_async_remove(spa, spa->spa_root_vdev); + for (int i = 0; i < spa->spa_l2cache.sav_count; i++) + spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); + for (int i = 0; i < spa->spa_spares.sav_count; i++) + spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); + (void) spa_vdev_state_exit(spa, NULL, 0); + } + + /* + * See if any devices need to be probed. + */ + if (tasks & SPA_ASYNC_PROBE) { + spa_vdev_state_enter(spa); + spa_async_probe(spa, spa->spa_root_vdev); + (void) spa_vdev_state_exit(spa, NULL, 0); + } + + /* + * If any devices are done replacing, detach them. + */ + if (tasks & SPA_ASYNC_RESILVER_DONE) + spa_vdev_resilver_done(spa); + + /* + * Kick off a resilver. + */ + if (tasks & SPA_ASYNC_RESILVER) + VERIFY(spa_scrub(spa, POOL_SCRUB_RESILVER) == 0); + + /* + * Let the world know that we're done. + */ + mutex_enter(&spa->spa_async_lock); + spa->spa_async_thread = NULL; + cv_broadcast(&spa->spa_async_cv); + mutex_exit(&spa->spa_async_lock); + thread_exit(); +} + +void +spa_async_suspend(spa_t *spa) +{ + mutex_enter(&spa->spa_async_lock); + spa->spa_async_suspended++; + while (spa->spa_async_thread != NULL) + cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); + mutex_exit(&spa->spa_async_lock); +} + +void +spa_async_resume(spa_t *spa) +{ + mutex_enter(&spa->spa_async_lock); + ASSERT(spa->spa_async_suspended != 0); + spa->spa_async_suspended--; + mutex_exit(&spa->spa_async_lock); +} + +static void +spa_async_dispatch(spa_t *spa) +{ + mutex_enter(&spa->spa_async_lock); + if (spa->spa_async_tasks && !spa->spa_async_suspended && + spa->spa_async_thread == NULL && + rootdir != NULL && !vn_is_readonly(rootdir)) + spa->spa_async_thread = thread_create(NULL, 0, + spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); + mutex_exit(&spa->spa_async_lock); +} + +void +spa_async_request(spa_t *spa, int task) +{ + mutex_enter(&spa->spa_async_lock); + spa->spa_async_tasks |= task; + mutex_exit(&spa->spa_async_lock); +} + +/* + * ========================================================================== + * SPA syncing routines + * ========================================================================== + */ + +static void +spa_sync_deferred_frees(spa_t *spa, uint64_t txg) +{ + bplist_t *bpl = &spa->spa_sync_bplist; + dmu_tx_t *tx; + blkptr_t blk; + uint64_t itor = 0; + zio_t *zio; + int error; + uint8_t c = 1; + + zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); + + while (bplist_iterate(bpl, &itor, &blk) == 0) { + ASSERT(blk.blk_birth < txg); + zio_nowait(zio_free(zio, spa, txg, &blk, NULL, NULL, + ZIO_FLAG_MUSTSUCCEED)); + } + + error = zio_wait(zio); + ASSERT3U(error, ==, 0); + + tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); + bplist_vacate(bpl, tx); + + /* + * Pre-dirty the first block so we sync to convergence faster. + * (Usually only the first block is needed.) + */ + dmu_write(spa->spa_meta_objset, spa->spa_sync_bplist_obj, 0, 1, &c, tx); + dmu_tx_commit(tx); +} + +static void +spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) +{ + char *packed = NULL; + size_t bufsize; + size_t nvsize = 0; + dmu_buf_t *db; + + VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); + + /* + * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration + * information. This avoids the dbuf_will_dirty() path and + * saves us a pre-read to get data we don't actually care about. + */ + bufsize = P2ROUNDUP(nvsize, SPA_CONFIG_BLOCKSIZE); + packed = kmem_alloc(bufsize, KM_SLEEP); + + VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, + KM_SLEEP) == 0); + bzero(packed + nvsize, bufsize - nvsize); + + dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); + + kmem_free(packed, bufsize); + + VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); + dmu_buf_will_dirty(db, tx); + *(uint64_t *)db->db_data = nvsize; + dmu_buf_rele(db, FTAG); +} + +static void +spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, + const char *config, const char *entry) +{ + nvlist_t *nvroot; + nvlist_t **list; + int i; + + if (!sav->sav_sync) + return; + + /* + * Update the MOS nvlist describing the list of available devices. + * spa_validate_aux() will have already made sure this nvlist is + * valid and the vdevs are labeled appropriately. + */ + if (sav->sav_object == 0) { + sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, + DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, + sizeof (uint64_t), tx); + VERIFY(zap_update(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, + &sav->sav_object, tx) == 0); + } + + VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); + if (sav->sav_count == 0) { + VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); + } else { + list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); + for (i = 0; i < sav->sav_count; i++) + list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], + B_FALSE, B_FALSE, B_TRUE); + VERIFY(nvlist_add_nvlist_array(nvroot, config, list, + sav->sav_count) == 0); + for (i = 0; i < sav->sav_count; i++) + nvlist_free(list[i]); + kmem_free(list, sav->sav_count * sizeof (void *)); + } + + spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); + nvlist_free(nvroot); + + sav->sav_sync = B_FALSE; +} + +static void +spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) +{ + nvlist_t *config; + + if (list_is_empty(&spa->spa_config_dirty_list)) + return; + + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + + config = spa_config_generate(spa, spa->spa_root_vdev, + dmu_tx_get_txg(tx), B_FALSE); + + spa_config_exit(spa, SCL_STATE, FTAG); + + if (spa->spa_config_syncing) + nvlist_free(spa->spa_config_syncing); + spa->spa_config_syncing = config; + + spa_sync_nvlist(spa, spa->spa_config_object, config, tx); +} + +/* + * Set zpool properties. + */ +static void +spa_sync_props(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + spa_t *spa = arg1; + objset_t *mos = spa->spa_meta_objset; + nvlist_t *nvp = arg2; + nvpair_t *elem; + uint64_t intval; + char *strval; + zpool_prop_t prop; + const char *propname; + zprop_type_t proptype; + spa_config_dirent_t *dp; + + mutex_enter(&spa->spa_props_lock); + + elem = NULL; + while ((elem = nvlist_next_nvpair(nvp, elem))) { + switch (prop = zpool_name_to_prop(nvpair_name(elem))) { + case ZPOOL_PROP_VERSION: + /* + * Only set version for non-zpool-creation cases + * (set/import). spa_create() needs special care + * for version setting. + */ + if (tx->tx_txg != TXG_INITIAL) { + VERIFY(nvpair_value_uint64(elem, + &intval) == 0); + ASSERT(intval <= SPA_VERSION); + ASSERT(intval >= spa_version(spa)); + spa->spa_uberblock.ub_version = intval; + vdev_config_dirty(spa->spa_root_vdev); + } + break; + + case ZPOOL_PROP_ALTROOT: + /* + * 'altroot' is a non-persistent property. It should + * have been set temporarily at creation or import time. + */ + ASSERT(spa->spa_root != NULL); + break; + + case ZPOOL_PROP_CACHEFILE: + /* + * 'cachefile' is a non-persistent property, but note + * an async request that the config cache needs to be + * udpated. + */ + VERIFY(nvpair_value_string(elem, &strval) == 0); + + dp = kmem_alloc(sizeof (spa_config_dirent_t), KM_SLEEP); + + if (strval[0] == '\0') + dp->scd_path = spa_strdup(spa_config_path); + else if (strcmp(strval, "none") == 0) + dp->scd_path = NULL; + else + dp->scd_path = spa_strdup(strval); + + list_insert_head(&spa->spa_config_list, dp); + spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); + break; + default: + /* + * Set pool property values in the poolprops mos object. + */ + if (spa->spa_pool_props_object == 0) { + objset_t *mos = spa->spa_meta_objset; + + VERIFY((spa->spa_pool_props_object = + zap_create(mos, DMU_OT_POOL_PROPS, + DMU_OT_NONE, 0, tx)) > 0); + + VERIFY(zap_update(mos, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, + 8, 1, &spa->spa_pool_props_object, tx) + == 0); + } + + /* normalize the property name */ + propname = zpool_prop_to_name(prop); + proptype = zpool_prop_get_type(prop); + + if (nvpair_type(elem) == DATA_TYPE_STRING) { + ASSERT(proptype == PROP_TYPE_STRING); + VERIFY(nvpair_value_string(elem, &strval) == 0); + VERIFY(zap_update(mos, + spa->spa_pool_props_object, propname, + 1, strlen(strval) + 1, strval, tx) == 0); + + } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { + VERIFY(nvpair_value_uint64(elem, &intval) == 0); + + if (proptype == PROP_TYPE_INDEX) { + const char *unused; + VERIFY(zpool_prop_index_to_string( + prop, intval, &unused) == 0); + } + VERIFY(zap_update(mos, + spa->spa_pool_props_object, propname, + 8, 1, &intval, tx) == 0); + } else { + ASSERT(0); /* not allowed */ + } + + switch (prop) { + case ZPOOL_PROP_DELEGATION: + spa->spa_delegation = intval; + break; + case ZPOOL_PROP_BOOTFS: + spa->spa_bootfs = intval; + break; + case ZPOOL_PROP_FAILUREMODE: + spa->spa_failmode = intval; + break; + default: + break; + } + } + + /* log internal history if this is not a zpool create */ + if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY && + tx->tx_txg != TXG_INITIAL) { + spa_history_internal_log(LOG_POOL_PROPSET, + spa, tx, cr, "%s %lld %s", + nvpair_name(elem), intval, spa_name(spa)); + } + } + + mutex_exit(&spa->spa_props_lock); +} + +/* + * Sync the specified transaction group. New blocks may be dirtied as + * part of the process, so we iterate until it converges. + */ +void +spa_sync(spa_t *spa, uint64_t txg) +{ + dsl_pool_t *dp = spa->spa_dsl_pool; + objset_t *mos = spa->spa_meta_objset; + bplist_t *bpl = &spa->spa_sync_bplist; + vdev_t *rvd = spa->spa_root_vdev; + vdev_t *vd; + dmu_tx_t *tx; + int dirty_vdevs; + int error; + + /* + * Lock out configuration changes. + */ + spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); + + spa->spa_syncing_txg = txg; + spa->spa_sync_pass = 0; + + /* + * If there are any pending vdev state changes, convert them + * into config changes that go out with this transaction group. + */ + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { + vdev_state_clean(vd); + vdev_config_dirty(vd); + } + spa_config_exit(spa, SCL_STATE, FTAG); + + VERIFY(0 == bplist_open(bpl, mos, spa->spa_sync_bplist_obj)); + + tx = dmu_tx_create_assigned(dp, txg); + + /* + * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, + * set spa_deflate if we have no raid-z vdevs. + */ + if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && + spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { + int i; + + for (i = 0; i < rvd->vdev_children; i++) { + vd = rvd->vdev_child[i]; + if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) + break; + } + if (i == rvd->vdev_children) { + spa->spa_deflate = TRUE; + VERIFY(0 == zap_add(spa->spa_meta_objset, + DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, + sizeof (uint64_t), 1, &spa->spa_deflate, tx)); + } + } + + if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && + spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { + dsl_pool_create_origin(dp, tx); + + /* Keeping the origin open increases spa_minref */ + spa->spa_minref += 3; + } + + if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && + spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { + dsl_pool_upgrade_clones(dp, tx); + } + + /* + * If anything has changed in this txg, push the deferred frees + * from the previous txg. If not, leave them alone so that we + * don't generate work on an otherwise idle system. + */ + if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || + !txg_list_empty(&dp->dp_dirty_dirs, txg) || + !txg_list_empty(&dp->dp_sync_tasks, txg)) + spa_sync_deferred_frees(spa, txg); + + /* + * Iterate to convergence. + */ + do { + spa->spa_sync_pass++; + + spa_sync_config_object(spa, tx); + spa_sync_aux_dev(spa, &spa->spa_spares, tx, + ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); + spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, + ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); + spa_errlog_sync(spa, txg); + dsl_pool_sync(dp, txg); + + dirty_vdevs = 0; + while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) { + vdev_sync(vd, txg); + dirty_vdevs++; + } + + bplist_sync(bpl, tx); + } while (dirty_vdevs); + + bplist_close(bpl); + + dprintf("txg %llu passes %d\n", txg, spa->spa_sync_pass); + + /* + * Rewrite the vdev configuration (which includes the uberblock) + * to commit the transaction group. + * + * If there are no dirty vdevs, we sync the uberblock to a few + * random top-level vdevs that are known to be visible in the + * config cache (see spa_vdev_add() for a complete description). + * If there *are* dirty vdevs, sync the uberblock to all vdevs. + */ + for (;;) { + /* + * We hold SCL_STATE to prevent vdev open/close/etc. + * while we're attempting to write the vdev labels. + */ + spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); + + if (list_is_empty(&spa->spa_config_dirty_list)) { + vdev_t *svd[SPA_DVAS_PER_BP]; + int svdcount = 0; + int children = rvd->vdev_children; + int c0 = spa_get_random(children); + int c; + + for (c = 0; c < children; c++) { + vd = rvd->vdev_child[(c0 + c) % children]; + if (vd->vdev_ms_array == 0 || vd->vdev_islog) + continue; + svd[svdcount++] = vd; + if (svdcount == SPA_DVAS_PER_BP) + break; + } + error = vdev_config_sync(svd, svdcount, txg); + } else { + error = vdev_config_sync(rvd->vdev_child, + rvd->vdev_children, txg); + } + + spa_config_exit(spa, SCL_STATE, FTAG); + + if (error == 0) + break; + zio_suspend(spa, NULL); + zio_resume_wait(spa); + } + dmu_tx_commit(tx); + + /* + * Clear the dirty config list. + */ + while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) + vdev_config_clean(vd); + + /* + * Now that the new config has synced transactionally, + * let it become visible to the config cache. + */ + if (spa->spa_config_syncing != NULL) { + spa_config_set(spa, spa->spa_config_syncing); + spa->spa_config_txg = txg; + spa->spa_config_syncing = NULL; + } + + spa->spa_ubsync = spa->spa_uberblock; + + /* + * Clean up the ZIL records for the synced txg. + */ + dsl_pool_zil_clean(dp); + + /* + * Update usable space statistics. + */ + while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) + vdev_sync_done(vd, txg); + + /* + * It had better be the case that we didn't dirty anything + * since vdev_config_sync(). + */ + ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); + ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); + ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); + ASSERT(bpl->bpl_queue == NULL); + + spa_config_exit(spa, SCL_CONFIG, FTAG); + + /* + * If any async tasks have been requested, kick them off. + */ + spa_async_dispatch(spa); +} + +/* + * Sync all pools. We don't want to hold the namespace lock across these + * operations, so we take a reference on the spa_t and drop the lock during the + * sync. + */ +void +spa_sync_allpools(void) +{ + spa_t *spa = NULL; + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(spa)) != NULL) { + if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa)) + continue; + spa_open_ref(spa, FTAG); + mutex_exit(&spa_namespace_lock); + txg_wait_synced(spa_get_dsl(spa), 0); + mutex_enter(&spa_namespace_lock); + spa_close(spa, FTAG); + } + mutex_exit(&spa_namespace_lock); +} + +/* + * ========================================================================== + * Miscellaneous routines + * ========================================================================== + */ + +/* + * Remove all pools in the system. + */ +void +spa_evict_all(void) +{ + spa_t *spa; + + /* + * Remove all cached state. All pools should be closed now, + * so every spa in the AVL tree should be unreferenced. + */ + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(NULL)) != NULL) { + /* + * Stop async tasks. The async thread may need to detach + * a device that's been replaced, which requires grabbing + * spa_namespace_lock, so we must drop it here. + */ + spa_open_ref(spa, FTAG); + mutex_exit(&spa_namespace_lock); + spa_async_suspend(spa); + mutex_enter(&spa_namespace_lock); + spa_close(spa, FTAG); + + if (spa->spa_state != POOL_STATE_UNINITIALIZED) { + spa_unload(spa); + spa_deactivate(spa); + } + spa_remove(spa); + } + mutex_exit(&spa_namespace_lock); +} + +vdev_t * +spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache) +{ + vdev_t *vd; + int i; + + if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) + return (vd); + + if (l2cache) { + for (i = 0; i < spa->spa_l2cache.sav_count; i++) { + vd = spa->spa_l2cache.sav_vdevs[i]; + if (vd->vdev_guid == guid) + return (vd); + } + } + + return (NULL); +} + +void +spa_upgrade(spa_t *spa, uint64_t version) +{ + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + + /* + * This should only be called for a non-faulted pool, and since a + * future version would result in an unopenable pool, this shouldn't be + * possible. + */ + ASSERT(spa->spa_uberblock.ub_version <= SPA_VERSION); + ASSERT(version >= spa->spa_uberblock.ub_version); + + spa->spa_uberblock.ub_version = version; + vdev_config_dirty(spa->spa_root_vdev); + + spa_config_exit(spa, SCL_ALL, FTAG); + + txg_wait_synced(spa_get_dsl(spa), 0); +} + +boolean_t +spa_has_spare(spa_t *spa, uint64_t guid) +{ + int i; + uint64_t spareguid; + spa_aux_vdev_t *sav = &spa->spa_spares; + + for (i = 0; i < sav->sav_count; i++) + if (sav->sav_vdevs[i]->vdev_guid == guid) + return (B_TRUE); + + for (i = 0; i < sav->sav_npending; i++) { + if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, + &spareguid) == 0 && spareguid == guid) + return (B_TRUE); + } + + return (B_FALSE); +} + +/* + * Check if a pool has an active shared spare device. + * Note: reference count of an active spare is 2, as a spare and as a replace + */ +static boolean_t +spa_has_active_shared_spare(spa_t *spa) +{ + int i, refcnt; + uint64_t pool; + spa_aux_vdev_t *sav = &spa->spa_spares; + + for (i = 0; i < sav->sav_count; i++) { + if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, + &refcnt) && pool != 0ULL && pool == spa_guid(spa) && + refcnt > 2) + return (B_TRUE); + } + + return (B_FALSE); +} + +/* + * Post a sysevent corresponding to the given event. The 'name' must be one of + * the event definitions in sys/sysevent/eventdefs.h. The payload will be + * filled in from the spa and (optionally) the vdev. This doesn't do anything + * in the userland libzpool, as we don't want consumers to misinterpret ztest + * or zdb as real changes. + */ +void +spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) +{ +#ifdef _KERNEL + sysevent_t *ev; + sysevent_attr_list_t *attr = NULL; + sysevent_value_t value; + sysevent_id_t eid; + + ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", + SE_SLEEP); + + value.value_type = SE_DATA_TYPE_STRING; + value.value.sv_string = spa_name(spa); + if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) + goto done; + + value.value_type = SE_DATA_TYPE_UINT64; + value.value.sv_uint64 = spa_guid(spa); + if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) + goto done; + + if (vd) { + value.value_type = SE_DATA_TYPE_UINT64; + value.value.sv_uint64 = vd->vdev_guid; + if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, + SE_SLEEP) != 0) + goto done; + + if (vd->vdev_path) { + value.value_type = SE_DATA_TYPE_STRING; + value.value.sv_string = vd->vdev_path; + if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, + &value, SE_SLEEP) != 0) + goto done; + } + } + + if (sysevent_attach_attributes(ev, attr) != 0) + goto done; + attr = NULL; + + (void) log_sysevent(ev, SE_SLEEP, &eid); + +done: + if (attr) + sysevent_free_attr(attr); + sysevent_free(ev); +#endif +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/spa_config.c b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_config.c new file mode 100644 index 000000000000..ee425a91694f --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_config.c @@ -0,0 +1,444 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef _KERNEL +#include +#endif + +/* + * Pool configuration repository. + * + * Pool configuration is stored as a packed nvlist on the filesystem. By + * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot + * (when the ZFS module is loaded). Pools can also have the 'cachefile' + * property set that allows them to be stored in an alternate location until + * the control of external software. + * + * For each cache file, we have a single nvlist which holds all the + * configuration information. When the module loads, we read this information + * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is + * maintained independently in spa.c. Whenever the namespace is modified, or + * the configuration of a pool is changed, we call spa_config_sync(), which + * walks through all the active pools and writes the configuration to disk. + */ + +static uint64_t spa_config_generation = 1; + +/* + * This can be overridden in userland to preserve an alternate namespace for + * userland pools when doing testing. + */ +const char *spa_config_path = ZPOOL_CACHE; + +/* + * Called when the module is first loaded, this routine loads the configuration + * file into the SPA namespace. It does not actually open or load the pools; it + * only populates the namespace. + */ +void +spa_config_load(void) +{ + void *buf = NULL; + nvlist_t *nvlist, *child; + nvpair_t *nvpair; + spa_t *spa; + char *pathname; + struct _buf *file; + uint64_t fsize; + + /* + * Open the configuration file. + */ + pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); + + (void) snprintf(pathname, MAXPATHLEN, "%s%s", + (rootdir != NULL) ? "./" : "", spa_config_path); + + file = kobj_open_file(pathname); + + kmem_free(pathname, MAXPATHLEN); + + if (file == (struct _buf *)-1) + return; + + if (kobj_get_filesize(file, &fsize) != 0) + goto out; + + buf = kmem_alloc(fsize, KM_SLEEP); + + /* + * Read the nvlist from the file. + */ + if (kobj_read_file(file, buf, fsize, 0) < 0) + goto out; + + /* + * Unpack the nvlist. + */ + if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0) + goto out; + + /* + * Iterate over all elements in the nvlist, creating a new spa_t for + * each one with the specified configuration. + */ + mutex_enter(&spa_namespace_lock); + nvpair = NULL; + while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { + + if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) + continue; + + VERIFY(nvpair_value_nvlist(nvpair, &child) == 0); + + if (spa_lookup(nvpair_name(nvpair)) != NULL) + continue; + spa = spa_add(nvpair_name(nvpair), NULL); + + /* + * We blindly duplicate the configuration here. If it's + * invalid, we will catch it when the pool is first opened. + */ + VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0); + } + mutex_exit(&spa_namespace_lock); + + nvlist_free(nvlist); + +out: + if (buf != NULL) + kmem_free(buf, fsize); + + kobj_close_file(file); +} + +static void +spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) +{ + size_t buflen; + char *buf; + vnode_t *vp; + int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX; + char *temp; + + /* + * If the nvlist is empty (NULL), then remove the old cachefile. + */ + if (nvl == NULL) { + (void) vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE); + return; + } + + /* + * Pack the configuration into a buffer. + */ + VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0); + + buf = kmem_alloc(buflen, KM_SLEEP); + temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP); + + VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR, + KM_SLEEP) == 0); + + /* + * Write the configuration to disk. We need to do the traditional + * 'write to temporary file, sync, move over original' to make sure we + * always have a consistent view of the data. + */ + (void) snprintf(temp, MAXPATHLEN, "%s.tmp", dp->scd_path); + + if (vn_open(temp, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0) == 0) { + if (vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE, + 0, RLIM64_INFINITY, kcred, NULL) == 0 && + VOP_FSYNC(vp, FSYNC, kcred, NULL) == 0) { + (void) vn_rename(temp, dp->scd_path, UIO_SYSSPACE); + } + (void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL); + VN_RELE(vp); + } + + (void) vn_remove(temp, UIO_SYSSPACE, RMFILE); + + kmem_free(buf, buflen); + kmem_free(temp, MAXPATHLEN); +} + +/* + * Synchronize pool configuration to disk. This must be called with the + * namespace lock held. + */ +void +spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent) +{ + spa_config_dirent_t *dp, *tdp; + nvlist_t *nvl; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + /* + * Iterate over all cachefiles for the pool, past or present. When the + * cachefile is changed, the new one is pushed onto this list, allowing + * us to update previous cachefiles that no longer contain this pool. + */ + for (dp = list_head(&target->spa_config_list); dp != NULL; + dp = list_next(&target->spa_config_list, dp)) { + spa_t *spa = NULL; + if (dp->scd_path == NULL) + continue; + + /* + * Iterate over all pools, adding any matching pools to 'nvl'. + */ + nvl = NULL; + while ((spa = spa_next(spa)) != NULL) { + if (spa == target && removing) + continue; + + mutex_enter(&spa->spa_props_lock); + tdp = list_head(&spa->spa_config_list); + if (spa->spa_config == NULL || + tdp->scd_path == NULL || + strcmp(tdp->scd_path, dp->scd_path) != 0) { + mutex_exit(&spa->spa_props_lock); + continue; + } + + if (nvl == NULL) + VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, + KM_SLEEP) == 0); + + VERIFY(nvlist_add_nvlist(nvl, spa->spa_name, + spa->spa_config) == 0); + mutex_exit(&spa->spa_props_lock); + } + + spa_config_write(dp, nvl); + nvlist_free(nvl); + } + + /* + * Remove any config entries older than the current one. + */ + dp = list_head(&target->spa_config_list); + while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { + list_remove(&target->spa_config_list, tdp); + if (tdp->scd_path != NULL) + spa_strfree(tdp->scd_path); + kmem_free(tdp, sizeof (spa_config_dirent_t)); + } + + spa_config_generation++; + + if (postsysevent) + spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC); +} + +/* + * Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache, + * and we don't want to allow the local zone to see all the pools anyway. + * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration + * information for all pool visible within the zone. + */ +nvlist_t * +spa_all_configs(uint64_t *generation) +{ + nvlist_t *pools; + spa_t *spa = NULL; + + if (*generation == spa_config_generation) + return (NULL); + + VERIFY(nvlist_alloc(&pools, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + mutex_enter(&spa_namespace_lock); + while ((spa = spa_next(spa)) != NULL) { + if (INGLOBALZONE(curproc) || + zone_dataset_visible(spa_name(spa), NULL)) { + mutex_enter(&spa->spa_props_lock); + VERIFY(nvlist_add_nvlist(pools, spa_name(spa), + spa->spa_config) == 0); + mutex_exit(&spa->spa_props_lock); + } + } + *generation = spa_config_generation; + mutex_exit(&spa_namespace_lock); + + return (pools); +} + +void +spa_config_set(spa_t *spa, nvlist_t *config) +{ + mutex_enter(&spa->spa_props_lock); + if (spa->spa_config != NULL) + nvlist_free(spa->spa_config); + spa->spa_config = config; + mutex_exit(&spa->spa_props_lock); +} + +/* + * Generate the pool's configuration based on the current in-core state. + * We infer whether to generate a complete config or just one top-level config + * based on whether vd is the root vdev. + */ +nvlist_t * +spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) +{ + nvlist_t *config, *nvroot; + vdev_t *rvd = spa->spa_root_vdev; + unsigned long hostid = 0; + boolean_t locked = B_FALSE; + + if (vd == NULL) { + vd = rvd; + locked = B_TRUE; + spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); + } + + ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == + (SCL_CONFIG | SCL_STATE)); + + /* + * If txg is -1, report the current value of spa->spa_config_txg. + */ + if (txg == -1ULL) + txg = spa->spa_config_txg; + + VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, + spa_version(spa)) == 0); + VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, + spa_name(spa)) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, + spa_state(spa)) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, + txg) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, + spa_guid(spa)) == 0); + (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); + if (hostid != 0) { + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, + hostid) == 0); + } + VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, + utsname.nodename) == 0); + + if (vd != rvd) { + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, + vd->vdev_top->vdev_guid) == 0); + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, + vd->vdev_guid) == 0); + if (vd->vdev_isspare) + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, + 1ULL) == 0); + if (vd->vdev_islog) + VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, + 1ULL) == 0); + vd = vd->vdev_top; /* label contains top config */ + } + + nvroot = vdev_config_generate(spa, vd, getstats, B_FALSE, B_FALSE); + VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); + nvlist_free(nvroot); + + if (locked) + spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); + + return (config); +} + +/* + * For a pool that's not currently a booting rootpool, update all disk labels, + * generate a fresh config based on the current in-core state, and sync the + * global config cache. + */ +void +spa_config_update(spa_t *spa, int what) +{ + spa_config_update_common(spa, what, FALSE); +} + +/* + * Update all disk labels, generate a fresh config based on the current + * in-core state, and sync the global config cache (do not sync the config + * cache if this is a booting rootpool). + */ +void +spa_config_update_common(spa_t *spa, int what, boolean_t isroot) +{ + vdev_t *rvd = spa->spa_root_vdev; + uint64_t txg; + int c; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + txg = spa_last_synced_txg(spa) + 1; + if (what == SPA_CONFIG_UPDATE_POOL) { + vdev_config_dirty(rvd); + } else { + /* + * If we have top-level vdevs that were added but have + * not yet been prepared for allocation, do that now. + * (It's safe now because the config cache is up to date, + * so it will be able to translate the new DVAs.) + * See comments in spa_vdev_add() for full details. + */ + for (c = 0; c < rvd->vdev_children; c++) { + vdev_t *tvd = rvd->vdev_child[c]; + if (tvd->vdev_ms_array == 0) { + vdev_init(tvd, txg); + vdev_config_dirty(tvd); + } + } + } + spa_config_exit(spa, SCL_ALL, FTAG); + + /* + * Wait for the mosconfig to be regenerated and synced. + */ + txg_wait_synced(spa->spa_dsl_pool, txg); + + /* + * Update the global config cache to reflect the new mosconfig. + */ + if (!isroot) + spa_config_sync(spa, B_FALSE, what != SPA_CONFIG_UPDATE_POOL); + + if (what == SPA_CONFIG_UPDATE_POOL) + spa_config_update_common(spa, SPA_CONFIG_UPDATE_VDEVS, isroot); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/spa_errlog.c b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_errlog.c new file mode 100644 index 000000000000..c642bd768b49 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_errlog.c @@ -0,0 +1,437 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * Routines to manage the on-disk persistent error log. + * + * Each pool stores a log of all logical data errors seen during normal + * operation. This is actually the union of two distinct logs: the last log, + * and the current log. All errors seen are logged to the current log. When a + * scrub completes, the current log becomes the last log, the last log is thrown + * out, and the current log is reinitialized. This way, if an error is somehow + * corrected, a new scrub will show that that it no longer exists, and will be + * deleted from the log when the scrub completes. + * + * The log is stored using a ZAP object whose key is a string form of the + * zbookmark tuple (objset, object, level, blkid), and whose contents is an + * optional 'objset:object' human-readable string describing the data. When an + * error is first logged, this string will be empty, indicating that no name is + * known. This prevents us from having to issue a potentially large amount of + * I/O to discover the object name during an error path. Instead, we do the + * calculation when the data is requested, storing the result so future queries + * will be faster. + * + * This log is then shipped into an nvlist where the key is the dataset name and + * the value is the object name. Userland is then responsible for uniquifying + * this list and displaying it to the user. + */ + +#include +#include +#include +#include +#include + +/* + * This is a stripped-down version of strtoull, suitable only for converting + * lowercase hexidecimal numbers that don't overflow. + */ +#ifdef _KERNEL +static uint64_t +strtonum(char *str, char **nptr) +{ + uint64_t val = 0; + char c; + int digit; + + while ((c = *str) != '\0') { + if (c >= '0' && c <= '9') + digit = c - '0'; + else if (c >= 'a' && c <= 'f') + digit = 10 + c - 'a'; + else + break; + + val *= 16; + val += digit; + + str++; + } + + *nptr = str; + + return (val); +} +#endif + +/* + * Convert a bookmark to a string. + */ +static void +bookmark_to_name(zbookmark_t *zb, char *buf, size_t len) +{ + (void) snprintf(buf, len, "%llx:%llx:%llx:%llx", + (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, + (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid); +} + +/* + * Convert a string to a bookmark + */ +#ifdef _KERNEL +static void +name_to_bookmark(char *buf, zbookmark_t *zb) +{ + zb->zb_objset = strtonum(buf, &buf); + ASSERT(*buf == ':'); + zb->zb_object = strtonum(buf + 1, &buf); + ASSERT(*buf == ':'); + zb->zb_level = (int)strtonum(buf + 1, &buf); + ASSERT(*buf == ':'); + zb->zb_blkid = strtonum(buf + 1, &buf); + ASSERT(*buf == '\0'); +} +#endif + +/* + * Log an uncorrectable error to the persistent error log. We add it to the + * spa's list of pending errors. The changes are actually synced out to disk + * during spa_errlog_sync(). + */ +void +spa_log_error(spa_t *spa, zio_t *zio) +{ + zbookmark_t *zb = &zio->io_logical->io_bookmark; + spa_error_entry_t search; + spa_error_entry_t *new; + avl_tree_t *tree; + avl_index_t where; + + /* + * If we are trying to import a pool, ignore any errors, as we won't be + * writing to the pool any time soon. + */ + if (spa->spa_load_state == SPA_LOAD_TRYIMPORT) + return; + + mutex_enter(&spa->spa_errlist_lock); + + /* + * If we have had a request to rotate the log, log it to the next list + * instead of the current one. + */ + if (spa->spa_scrub_active || spa->spa_scrub_finished) + tree = &spa->spa_errlist_scrub; + else + tree = &spa->spa_errlist_last; + + search.se_bookmark = *zb; + if (avl_find(tree, &search, &where) != NULL) { + mutex_exit(&spa->spa_errlist_lock); + return; + } + + new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP); + new->se_bookmark = *zb; + avl_insert(tree, new, where); + + mutex_exit(&spa->spa_errlist_lock); +} + +/* + * Return the number of errors currently in the error log. This is actually the + * sum of both the last log and the current log, since we don't know the union + * of these logs until we reach userland. + */ +uint64_t +spa_get_errlog_size(spa_t *spa) +{ + uint64_t total = 0, count; + + mutex_enter(&spa->spa_errlog_lock); + if (spa->spa_errlog_scrub != 0 && + zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub, + &count) == 0) + total += count; + + if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished && + zap_count(spa->spa_meta_objset, spa->spa_errlog_last, + &count) == 0) + total += count; + mutex_exit(&spa->spa_errlog_lock); + + mutex_enter(&spa->spa_errlist_lock); + total += avl_numnodes(&spa->spa_errlist_last); + total += avl_numnodes(&spa->spa_errlist_scrub); + mutex_exit(&spa->spa_errlist_lock); + + return (total); +} + +#ifdef _KERNEL +static int +process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count) +{ + zap_cursor_t zc; + zap_attribute_t za; + zbookmark_t zb; + + if (obj == 0) + return (0); + + for (zap_cursor_init(&zc, spa->spa_meta_objset, obj); + zap_cursor_retrieve(&zc, &za) == 0; + zap_cursor_advance(&zc)) { + + if (*count == 0) { + zap_cursor_fini(&zc); + return (ENOMEM); + } + + name_to_bookmark(za.za_name, &zb); + + if (copyout(&zb, (char *)addr + + (*count - 1) * sizeof (zbookmark_t), + sizeof (zbookmark_t)) != 0) + return (EFAULT); + + *count -= 1; + } + + zap_cursor_fini(&zc); + + return (0); +} + +static int +process_error_list(avl_tree_t *list, void *addr, size_t *count) +{ + spa_error_entry_t *se; + + for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) { + + if (*count == 0) + return (ENOMEM); + + if (copyout(&se->se_bookmark, (char *)addr + + (*count - 1) * sizeof (zbookmark_t), + sizeof (zbookmark_t)) != 0) + return (EFAULT); + + *count -= 1; + } + + return (0); +} +#endif + +/* + * Copy all known errors to userland as an array of bookmarks. This is + * actually a union of the on-disk last log and current log, as well as any + * pending error requests. + * + * Because the act of reading the on-disk log could cause errors to be + * generated, we have two separate locks: one for the error log and one for the + * in-core error lists. We only need the error list lock to log and error, so + * we grab the error log lock while we read the on-disk logs, and only pick up + * the error list lock when we are finished. + */ +int +spa_get_errlog(spa_t *spa, void *uaddr, size_t *count) +{ + int ret = 0; + +#ifdef _KERNEL + mutex_enter(&spa->spa_errlog_lock); + + ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count); + + if (!ret && !spa->spa_scrub_finished) + ret = process_error_log(spa, spa->spa_errlog_last, uaddr, + count); + + mutex_enter(&spa->spa_errlist_lock); + if (!ret) + ret = process_error_list(&spa->spa_errlist_scrub, uaddr, + count); + if (!ret) + ret = process_error_list(&spa->spa_errlist_last, uaddr, + count); + mutex_exit(&spa->spa_errlist_lock); + + mutex_exit(&spa->spa_errlog_lock); +#endif + + return (ret); +} + +/* + * Called when a scrub completes. This simply set a bit which tells which AVL + * tree to add new errors. spa_errlog_sync() is responsible for actually + * syncing the changes to the underlying objects. + */ +void +spa_errlog_rotate(spa_t *spa) +{ + mutex_enter(&spa->spa_errlist_lock); + spa->spa_scrub_finished = B_TRUE; + mutex_exit(&spa->spa_errlist_lock); +} + +/* + * Discard any pending errors from the spa_t. Called when unloading a faulted + * pool, as the errors encountered during the open cannot be synced to disk. + */ +void +spa_errlog_drain(spa_t *spa) +{ + spa_error_entry_t *se; + void *cookie; + + mutex_enter(&spa->spa_errlist_lock); + + cookie = NULL; + while ((se = avl_destroy_nodes(&spa->spa_errlist_last, + &cookie)) != NULL) + kmem_free(se, sizeof (spa_error_entry_t)); + cookie = NULL; + while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub, + &cookie)) != NULL) + kmem_free(se, sizeof (spa_error_entry_t)); + + mutex_exit(&spa->spa_errlist_lock); +} + +/* + * Process a list of errors into the current on-disk log. + */ +static void +sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx) +{ + spa_error_entry_t *se; + char buf[64]; + void *cookie; + + if (avl_numnodes(t) != 0) { + /* create log if necessary */ + if (*obj == 0) + *obj = zap_create(spa->spa_meta_objset, + DMU_OT_ERROR_LOG, DMU_OT_NONE, + 0, tx); + + /* add errors to the current log */ + for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) { + char *name = se->se_name ? se->se_name : ""; + + bookmark_to_name(&se->se_bookmark, buf, sizeof (buf)); + + (void) zap_update(spa->spa_meta_objset, + *obj, buf, 1, strlen(name) + 1, name, tx); + } + + /* purge the error list */ + cookie = NULL; + while ((se = avl_destroy_nodes(t, &cookie)) != NULL) + kmem_free(se, sizeof (spa_error_entry_t)); + } +} + +/* + * Sync the error log out to disk. This is a little tricky because the act of + * writing the error log requires the spa_errlist_lock. So, we need to lock the + * error lists, take a copy of the lists, and then reinitialize them. Then, we + * drop the error list lock and take the error log lock, at which point we + * do the errlog processing. Then, if we encounter an I/O error during this + * process, we can successfully add the error to the list. Note that this will + * result in the perpetual recycling of errors, but it is an unlikely situation + * and not a performance critical operation. + */ +void +spa_errlog_sync(spa_t *spa, uint64_t txg) +{ + dmu_tx_t *tx; + avl_tree_t scrub, last; + int scrub_finished; + + mutex_enter(&spa->spa_errlist_lock); + + /* + * Bail out early under normal circumstances. + */ + if (avl_numnodes(&spa->spa_errlist_scrub) == 0 && + avl_numnodes(&spa->spa_errlist_last) == 0 && + !spa->spa_scrub_finished) { + mutex_exit(&spa->spa_errlist_lock); + return; + } + + spa_get_errlists(spa, &last, &scrub); + scrub_finished = spa->spa_scrub_finished; + spa->spa_scrub_finished = B_FALSE; + + mutex_exit(&spa->spa_errlist_lock); + mutex_enter(&spa->spa_errlog_lock); + + tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); + + /* + * Sync out the current list of errors. + */ + sync_error_list(spa, &last, &spa->spa_errlog_last, tx); + + /* + * Rotate the log if necessary. + */ + if (scrub_finished) { + if (spa->spa_errlog_last != 0) + VERIFY(dmu_object_free(spa->spa_meta_objset, + spa->spa_errlog_last, tx) == 0); + spa->spa_errlog_last = spa->spa_errlog_scrub; + spa->spa_errlog_scrub = 0; + + sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx); + } + + /* + * Sync out any pending scrub errors. + */ + sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx); + + /* + * Update the MOS to reflect the new values. + */ + (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1, + &spa->spa_errlog_last, tx); + (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1, + &spa->spa_errlog_scrub, tx); + + dmu_tx_commit(tx); + + mutex_exit(&spa->spa_errlog_lock); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/spa_history.c b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_history.c new file mode 100644 index 000000000000..c997240c148f --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_history.c @@ -0,0 +1,428 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef _KERNEL +#include +#endif + +/* + * Routines to manage the on-disk history log. + * + * The history log is stored as a dmu object containing + * tuples. + * + * Where "record nvlist" is a nvlist containing uint64_ts and strings, and + * "packed record length" is the packed length of the "record nvlist" stored + * as a little endian uint64_t. + * + * The log is implemented as a ring buffer, though the original creation + * of the pool ('zpool create') is never overwritten. + * + * The history log is tracked as object 'spa_t::spa_history'. The bonus buffer + * of 'spa_history' stores the offsets for logging/retrieving history as + * 'spa_history_phys_t'. 'sh_pool_create_len' is the ending offset in bytes of + * where the 'zpool create' record is stored. This allows us to never + * overwrite the original creation of the pool. 'sh_phys_max_off' is the + * physical ending offset in bytes of the log. This tells you the length of + * the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record + * is added, 'sh_eof' is incremented by the the size of the record. + * 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes). + * This is where the consumer should start reading from after reading in + * the 'zpool create' portion of the log. + * + * 'sh_records_lost' keeps track of how many records have been overwritten + * and permanently lost. + */ + +/* convert a logical offset to physical */ +static uint64_t +spa_history_log_to_phys(uint64_t log_off, spa_history_phys_t *shpp) +{ + uint64_t phys_len; + + phys_len = shpp->sh_phys_max_off - shpp->sh_pool_create_len; + return ((log_off - shpp->sh_pool_create_len) % phys_len + + shpp->sh_pool_create_len); +} + +void +spa_history_create_obj(spa_t *spa, dmu_tx_t *tx) +{ + dmu_buf_t *dbp; + spa_history_phys_t *shpp; + objset_t *mos = spa->spa_meta_objset; + + ASSERT(spa->spa_history == 0); + spa->spa_history = dmu_object_alloc(mos, DMU_OT_SPA_HISTORY, + SPA_MAXBLOCKSIZE, DMU_OT_SPA_HISTORY_OFFSETS, + sizeof (spa_history_phys_t), tx); + + VERIFY(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, + DMU_POOL_HISTORY, sizeof (uint64_t), 1, + &spa->spa_history, tx) == 0); + + VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)); + ASSERT(dbp->db_size >= sizeof (spa_history_phys_t)); + + shpp = dbp->db_data; + dmu_buf_will_dirty(dbp, tx); + + /* + * Figure out maximum size of history log. We set it at + * 1% of pool size, with a max of 32MB and min of 128KB. + */ + shpp->sh_phys_max_off = spa_get_dspace(spa) / 100; + shpp->sh_phys_max_off = MIN(shpp->sh_phys_max_off, 32<<20); + shpp->sh_phys_max_off = MAX(shpp->sh_phys_max_off, 128<<10); + + dmu_buf_rele(dbp, FTAG); +} + +/* + * Change 'sh_bof' to the beginning of the next record. + */ +static int +spa_history_advance_bof(spa_t *spa, spa_history_phys_t *shpp) +{ + objset_t *mos = spa->spa_meta_objset; + uint64_t firstread, reclen, phys_bof; + char buf[sizeof (reclen)]; + int err; + + phys_bof = spa_history_log_to_phys(shpp->sh_bof, shpp); + firstread = MIN(sizeof (reclen), shpp->sh_phys_max_off - phys_bof); + + if ((err = dmu_read(mos, spa->spa_history, phys_bof, firstread, + buf)) != 0) + return (err); + if (firstread != sizeof (reclen)) { + if ((err = dmu_read(mos, spa->spa_history, + shpp->sh_pool_create_len, sizeof (reclen) - firstread, + buf + firstread)) != 0) + return (err); + } + + reclen = LE_64(*((uint64_t *)buf)); + shpp->sh_bof += reclen + sizeof (reclen); + shpp->sh_records_lost++; + return (0); +} + +static int +spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp, + dmu_tx_t *tx) +{ + uint64_t firstwrite, phys_eof; + objset_t *mos = spa->spa_meta_objset; + int err; + + ASSERT(MUTEX_HELD(&spa->spa_history_lock)); + + /* see if we need to reset logical BOF */ + while (shpp->sh_phys_max_off - shpp->sh_pool_create_len - + (shpp->sh_eof - shpp->sh_bof) <= len) { + if ((err = spa_history_advance_bof(spa, shpp)) != 0) { + return (err); + } + } + + phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp); + firstwrite = MIN(len, shpp->sh_phys_max_off - phys_eof); + shpp->sh_eof += len; + dmu_write(mos, spa->spa_history, phys_eof, firstwrite, buf, tx); + + len -= firstwrite; + if (len > 0) { + /* write out the rest at the beginning of physical file */ + dmu_write(mos, spa->spa_history, shpp->sh_pool_create_len, + len, (char *)buf + firstwrite, tx); + } + + return (0); +} + +static char * +spa_history_zone() +{ +#ifdef _KERNEL + return (curproc->p_zone->zone_name); +#else + return ("global"); +#endif +} + +/* + * Write out a history event. + */ +static void +spa_history_log_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) +{ + spa_t *spa = arg1; + history_arg_t *hap = arg2; + const char *history_str = hap->ha_history_str; + objset_t *mos = spa->spa_meta_objset; + dmu_buf_t *dbp; + spa_history_phys_t *shpp; + size_t reclen; + uint64_t le_len; + nvlist_t *nvrecord; + char *record_packed = NULL; + int ret; + + /* + * If we have an older pool that doesn't have a command + * history object, create it now. + */ + mutex_enter(&spa->spa_history_lock); + if (!spa->spa_history) + spa_history_create_obj(spa, tx); + mutex_exit(&spa->spa_history_lock); + + /* + * Get the offset of where we need to write via the bonus buffer. + * Update the offset when the write completes. + */ + VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)); + shpp = dbp->db_data; + + dmu_buf_will_dirty(dbp, tx); + +#ifdef ZFS_DEBUG + { + dmu_object_info_t doi; + dmu_object_info_from_db(dbp, &doi); + ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS); + } +#endif + + VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TIME, + gethrestime_sec()) == 0); + VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_WHO, + (uint64_t)crgetuid(cr)) == 0); + if (hap->ha_zone[0] != '\0') + VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_ZONE, + hap->ha_zone) == 0); +#ifdef _KERNEL + VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_HOST, + utsname.nodename) == 0); +#endif + if (hap->ha_log_type == LOG_CMD_POOL_CREATE || + hap->ha_log_type == LOG_CMD_NORMAL) { + VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_CMD, + history_str) == 0); + } else { + VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_INT_EVENT, + hap->ha_event) == 0); + VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TXG, + tx->tx_txg) == 0); + VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_INT_STR, + history_str) == 0); + } + + VERIFY(nvlist_size(nvrecord, &reclen, NV_ENCODE_XDR) == 0); + record_packed = kmem_alloc(reclen, KM_SLEEP); + + VERIFY(nvlist_pack(nvrecord, &record_packed, &reclen, + NV_ENCODE_XDR, KM_SLEEP) == 0); + + mutex_enter(&spa->spa_history_lock); + if (hap->ha_log_type == LOG_CMD_POOL_CREATE) + VERIFY(shpp->sh_eof == shpp->sh_pool_create_len); + + /* write out the packed length as little endian */ + le_len = LE_64((uint64_t)reclen); + ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx); + if (!ret) + ret = spa_history_write(spa, record_packed, reclen, shpp, tx); + + if (!ret && hap->ha_log_type == LOG_CMD_POOL_CREATE) { + shpp->sh_pool_create_len += sizeof (le_len) + reclen; + shpp->sh_bof = shpp->sh_pool_create_len; + } + + mutex_exit(&spa->spa_history_lock); + nvlist_free(nvrecord); + kmem_free(record_packed, reclen); + dmu_buf_rele(dbp, FTAG); + + if (hap->ha_log_type == LOG_INTERNAL) { + kmem_free((void*)hap->ha_history_str, HIS_MAX_RECORD_LEN); + kmem_free(hap, sizeof (history_arg_t)); + } +} + +/* + * Write out a history event. + */ +int +spa_history_log(spa_t *spa, const char *history_str, history_log_type_t what) +{ + history_arg_t ha; + + ASSERT(what != LOG_INTERNAL); + + ha.ha_history_str = history_str; + ha.ha_log_type = what; + (void) strlcpy(ha.ha_zone, spa_history_zone(), sizeof (ha.ha_zone)); + return (dsl_sync_task_do(spa_get_dsl(spa), NULL, spa_history_log_sync, + spa, &ha, 0)); +} + +/* + * Read out the command history. + */ +int +spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf) +{ + objset_t *mos = spa->spa_meta_objset; + dmu_buf_t *dbp; + uint64_t read_len, phys_read_off, phys_eof; + uint64_t leftover = 0; + spa_history_phys_t *shpp; + int err; + + /* + * If the command history doesn't exist (older pool), + * that's ok, just return ENOENT. + */ + if (!spa->spa_history) + return (ENOENT); + + if ((err = dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)) != 0) + return (err); + shpp = dbp->db_data; + +#ifdef ZFS_DEBUG + { + dmu_object_info_t doi; + dmu_object_info_from_db(dbp, &doi); + ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS); + } +#endif + + mutex_enter(&spa->spa_history_lock); + phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp); + + if (*offp < shpp->sh_pool_create_len) { + /* read in just the zpool create history */ + phys_read_off = *offp; + read_len = MIN(*len, shpp->sh_pool_create_len - + phys_read_off); + } else { + /* + * Need to reset passed in offset to BOF if the passed in + * offset has since been overwritten. + */ + *offp = MAX(*offp, shpp->sh_bof); + phys_read_off = spa_history_log_to_phys(*offp, shpp); + + /* + * Read up to the minimum of what the user passed down or + * the EOF (physical or logical). If we hit physical EOF, + * use 'leftover' to read from the physical BOF. + */ + if (phys_read_off <= phys_eof) { + read_len = MIN(*len, phys_eof - phys_read_off); + } else { + read_len = MIN(*len, + shpp->sh_phys_max_off - phys_read_off); + if (phys_read_off + *len > shpp->sh_phys_max_off) { + leftover = MIN(*len - read_len, + phys_eof - shpp->sh_pool_create_len); + } + } + } + + /* offset for consumer to use next */ + *offp += read_len + leftover; + + /* tell the consumer how much you actually read */ + *len = read_len + leftover; + + if (read_len == 0) { + mutex_exit(&spa->spa_history_lock); + dmu_buf_rele(dbp, FTAG); + return (0); + } + + err = dmu_read(mos, spa->spa_history, phys_read_off, read_len, buf); + if (leftover && err == 0) { + err = dmu_read(mos, spa->spa_history, shpp->sh_pool_create_len, + leftover, buf + read_len); + } + mutex_exit(&spa->spa_history_lock); + + dmu_buf_rele(dbp, FTAG); + return (err); +} + +void +spa_history_internal_log(history_internal_events_t event, spa_t *spa, + dmu_tx_t *tx, cred_t *cr, const char *fmt, ...) +{ + history_arg_t *hap; + char *str; + va_list adx; + + /* + * If this is part of creating a pool, not everything is + * initialized yet, so don't bother logging the internal events. + */ + if (tx->tx_txg == TXG_INITIAL) + return; + + hap = kmem_alloc(sizeof (history_arg_t), KM_SLEEP); + str = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP); + + va_start(adx, fmt); + (void) vsnprintf(str, HIS_MAX_RECORD_LEN, fmt, adx); + va_end(adx); + + hap->ha_log_type = LOG_INTERNAL; + hap->ha_history_str = str; + hap->ha_event = event; + hap->ha_zone[0] = '\0'; + + if (dmu_tx_is_syncing(tx)) { + spa_history_log_sync(spa, hap, cr, tx); + } else { + dsl_sync_task_do_nowait(spa_get_dsl(spa), NULL, + spa_history_log_sync, spa, hap, 0, tx); + } + /* spa_history_log_sync() will free hap and str */ +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/spa_misc.c b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_misc.c new file mode 100644 index 000000000000..36046e6df1c0 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/spa_misc.c @@ -0,0 +1,1410 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zfs_prop.h" + +/* + * SPA locking + * + * There are four basic locks for managing spa_t structures: + * + * spa_namespace_lock (global mutex) + * + * This lock must be acquired to do any of the following: + * + * - Lookup a spa_t by name + * - Add or remove a spa_t from the namespace + * - Increase spa_refcount from non-zero + * - Check if spa_refcount is zero + * - Rename a spa_t + * - add/remove/attach/detach devices + * - Held for the duration of create/destroy/import/export + * + * It does not need to handle recursion. A create or destroy may + * reference objects (files or zvols) in other pools, but by + * definition they must have an existing reference, and will never need + * to lookup a spa_t by name. + * + * spa_refcount (per-spa refcount_t protected by mutex) + * + * This reference count keep track of any active users of the spa_t. The + * spa_t cannot be destroyed or freed while this is non-zero. Internally, + * the refcount is never really 'zero' - opening a pool implicitly keeps + * some references in the DMU. Internally we check against spa_minref, but + * present the image of a zero/non-zero value to consumers. + * + * spa_config_lock[] (per-spa array of rwlocks) + * + * This protects the spa_t from config changes, and must be held in + * the following circumstances: + * + * - RW_READER to perform I/O to the spa + * - RW_WRITER to change the vdev config + * + * The locking order is fairly straightforward: + * + * spa_namespace_lock -> spa_refcount + * + * The namespace lock must be acquired to increase the refcount from 0 + * or to check if it is zero. + * + * spa_refcount -> spa_config_lock[] + * + * There must be at least one valid reference on the spa_t to acquire + * the config lock. + * + * spa_namespace_lock -> spa_config_lock[] + * + * The namespace lock must always be taken before the config lock. + * + * + * The spa_namespace_lock can be acquired directly and is globally visible. + * + * The namespace is manipulated using the following functions, all of which + * require the spa_namespace_lock to be held. + * + * spa_lookup() Lookup a spa_t by name. + * + * spa_add() Create a new spa_t in the namespace. + * + * spa_remove() Remove a spa_t from the namespace. This also + * frees up any memory associated with the spa_t. + * + * spa_next() Returns the next spa_t in the system, or the + * first if NULL is passed. + * + * spa_evict_all() Shutdown and remove all spa_t structures in + * the system. + * + * spa_guid_exists() Determine whether a pool/device guid exists. + * + * The spa_refcount is manipulated using the following functions: + * + * spa_open_ref() Adds a reference to the given spa_t. Must be + * called with spa_namespace_lock held if the + * refcount is currently zero. + * + * spa_close() Remove a reference from the spa_t. This will + * not free the spa_t or remove it from the + * namespace. No locking is required. + * + * spa_refcount_zero() Returns true if the refcount is currently + * zero. Must be called with spa_namespace_lock + * held. + * + * The spa_config_lock[] is an array of rwlocks, ordered as follows: + * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. + * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). + * + * To read the configuration, it suffices to hold one of these locks as reader. + * To modify the configuration, you must hold all locks as writer. To modify + * vdev state without altering the vdev tree's topology (e.g. online/offline), + * you must hold SCL_STATE and SCL_ZIO as writer. + * + * We use these distinct config locks to avoid recursive lock entry. + * For example, spa_sync() (which holds SCL_CONFIG as reader) induces + * block allocations (SCL_ALLOC), which may require reading space maps + * from disk (dmu_read() -> zio_read() -> SCL_ZIO). + * + * The spa config locks cannot be normal rwlocks because we need the + * ability to hand off ownership. For example, SCL_ZIO is acquired + * by the issuing thread and later released by an interrupt thread. + * They do, however, obey the usual write-wanted semantics to prevent + * writer (i.e. system administrator) starvation. + * + * The lock acquisition rules are as follows: + * + * SCL_CONFIG + * Protects changes to the vdev tree topology, such as vdev + * add/remove/attach/detach. Protects the dirty config list + * (spa_config_dirty_list) and the set of spares and l2arc devices. + * + * SCL_STATE + * Protects changes to pool state and vdev state, such as vdev + * online/offline/fault/degrade/clear. Protects the dirty state list + * (spa_state_dirty_list) and global pool state (spa_state). + * + * SCL_ALLOC + * Protects changes to metaslab groups and classes. + * Held as reader by metaslab_alloc() and metaslab_claim(). + * + * SCL_ZIO + * Held by bp-level zios (those which have no io_vd upon entry) + * to prevent changes to the vdev tree. The bp-level zio implicitly + * protects all of its vdev child zios, which do not hold SCL_ZIO. + * + * SCL_FREE + * Protects changes to metaslab groups and classes. + * Held as reader by metaslab_free(). SCL_FREE is distinct from + * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free + * blocks in zio_done() while another i/o that holds either + * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. + * + * SCL_VDEV + * Held as reader to prevent changes to the vdev tree during trivial + * inquiries such as bp_get_dasize(). SCL_VDEV is distinct from the + * other locks, and lower than all of them, to ensure that it's safe + * to acquire regardless of caller context. + * + * In addition, the following rules apply: + * + * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. + * The lock ordering is SCL_CONFIG > spa_props_lock. + * + * (b) I/O operations on leaf vdevs. For any zio operation that takes + * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), + * or zio_write_phys() -- the caller must ensure that the config cannot + * cannot change in the interim, and that the vdev cannot be reopened. + * SCL_STATE as reader suffices for both. + * + * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). + * + * spa_vdev_enter() Acquire the namespace lock and the config lock + * for writing. + * + * spa_vdev_exit() Release the config lock, wait for all I/O + * to complete, sync the updated configs to the + * cache, and release the namespace lock. + * + * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). + * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual + * locking is, always, based on spa_namespace_lock and spa_config_lock[]. + * + * spa_rename() is also implemented within this file since is requires + * manipulation of the namespace. + */ + +static avl_tree_t spa_namespace_avl; +kmutex_t spa_namespace_lock; +static kcondvar_t spa_namespace_cv; +static int spa_active_count; +int spa_max_replication_override = SPA_DVAS_PER_BP; + +static kmutex_t spa_spare_lock; +static avl_tree_t spa_spare_avl; +static kmutex_t spa_l2cache_lock; +static avl_tree_t spa_l2cache_avl; + +kmem_cache_t *spa_buffer_pool; +int spa_mode; + +#ifdef ZFS_DEBUG +/* Everything except dprintf is on by default in debug builds */ +int zfs_flags = ~ZFS_DEBUG_DPRINTF; +#else +int zfs_flags = 0; +#endif + +/* + * zfs_recover can be set to nonzero to attempt to recover from + * otherwise-fatal errors, typically caused by on-disk corruption. When + * set, calls to zfs_panic_recover() will turn into warning messages. + */ +int zfs_recover = 0; + + +/* + * ========================================================================== + * SPA config locking + * ========================================================================== + */ +static void +spa_config_lock_init(spa_t *spa) +{ + for (int i = 0; i < SCL_LOCKS; i++) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); + refcount_create(&scl->scl_count); + scl->scl_writer = NULL; + scl->scl_write_wanted = 0; + } +} + +static void +spa_config_lock_destroy(spa_t *spa) +{ + for (int i = 0; i < SCL_LOCKS; i++) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + mutex_destroy(&scl->scl_lock); + cv_destroy(&scl->scl_cv); + refcount_destroy(&scl->scl_count); + ASSERT(scl->scl_writer == NULL); + ASSERT(scl->scl_write_wanted == 0); + } +} + +int +spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) +{ + for (int i = 0; i < SCL_LOCKS; i++) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + if (!(locks & (1 << i))) + continue; + mutex_enter(&scl->scl_lock); + if (rw == RW_READER) { + if (scl->scl_writer || scl->scl_write_wanted) { + mutex_exit(&scl->scl_lock); + spa_config_exit(spa, locks ^ (1 << i), tag); + return (0); + } + } else { + ASSERT(scl->scl_writer != curthread); + if (!refcount_is_zero(&scl->scl_count)) { + mutex_exit(&scl->scl_lock); + spa_config_exit(spa, locks ^ (1 << i), tag); + return (0); + } + scl->scl_writer = curthread; + } + (void) refcount_add(&scl->scl_count, tag); + mutex_exit(&scl->scl_lock); + } + return (1); +} + +void +spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) +{ + for (int i = 0; i < SCL_LOCKS; i++) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + if (!(locks & (1 << i))) + continue; + mutex_enter(&scl->scl_lock); + if (rw == RW_READER) { + while (scl->scl_writer || scl->scl_write_wanted) { + cv_wait(&scl->scl_cv, &scl->scl_lock); + } + } else { + ASSERT(scl->scl_writer != curthread); + while (!refcount_is_zero(&scl->scl_count)) { + scl->scl_write_wanted++; + cv_wait(&scl->scl_cv, &scl->scl_lock); + scl->scl_write_wanted--; + } + scl->scl_writer = curthread; + } + (void) refcount_add(&scl->scl_count, tag); + mutex_exit(&scl->scl_lock); + } +} + +void +spa_config_exit(spa_t *spa, int locks, void *tag) +{ + for (int i = SCL_LOCKS - 1; i >= 0; i--) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + if (!(locks & (1 << i))) + continue; + mutex_enter(&scl->scl_lock); + ASSERT(!refcount_is_zero(&scl->scl_count)); + if (refcount_remove(&scl->scl_count, tag) == 0) { + ASSERT(scl->scl_writer == NULL || + scl->scl_writer == curthread); + scl->scl_writer = NULL; /* OK in either case */ + cv_broadcast(&scl->scl_cv); + } + mutex_exit(&scl->scl_lock); + } +} + +int +spa_config_held(spa_t *spa, int locks, krw_t rw) +{ + int locks_held = 0; + + for (int i = 0; i < SCL_LOCKS; i++) { + spa_config_lock_t *scl = &spa->spa_config_lock[i]; + if (!(locks & (1 << i))) + continue; + if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || + (rw == RW_WRITER && scl->scl_writer == curthread)) + locks_held |= 1 << i; + } + + return (locks_held); +} + +/* + * ========================================================================== + * SPA namespace functions + * ========================================================================== + */ + +/* + * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. + * Returns NULL if no matching spa_t is found. + */ +spa_t * +spa_lookup(const char *name) +{ + static spa_t search; /* spa_t is large; don't allocate on stack */ + spa_t *spa; + avl_index_t where; + char c; + char *cp; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + /* + * If it's a full dataset name, figure out the pool name and + * just use that. + */ + cp = strpbrk(name, "/@"); + if (cp) { + c = *cp; + *cp = '\0'; + } + + (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); + spa = avl_find(&spa_namespace_avl, &search, &where); + + if (cp) + *cp = c; + + return (spa); +} + +/* + * Create an uninitialized spa_t with the given name. Requires + * spa_namespace_lock. The caller must ensure that the spa_t doesn't already + * exist by calling spa_lookup() first. + */ +spa_t * +spa_add(const char *name, const char *altroot) +{ + spa_t *spa; + spa_config_dirent_t *dp; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); + + mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_async_root_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); + + cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); + cv_init(&spa->spa_async_root_cv, NULL, CV_DEFAULT, NULL); + cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); + cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); + + (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); + spa->spa_state = POOL_STATE_UNINITIALIZED; + spa->spa_freeze_txg = UINT64_MAX; + spa->spa_final_txg = UINT64_MAX; + + refcount_create(&spa->spa_refcount); + spa_config_lock_init(spa); + + avl_add(&spa_namespace_avl, spa); + + mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); + + /* + * Set the alternate root, if there is one. + */ + if (altroot) { + spa->spa_root = spa_strdup(altroot); + spa_active_count++; + } + + /* + * Every pool starts with the default cachefile + */ + list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), + offsetof(spa_config_dirent_t, scd_link)); + + dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); + dp->scd_path = spa_strdup(spa_config_path); + list_insert_head(&spa->spa_config_list, dp); + + return (spa); +} + +/* + * Removes a spa_t from the namespace, freeing up any memory used. Requires + * spa_namespace_lock. This is called only after the spa_t has been closed and + * deactivated. + */ +void +spa_remove(spa_t *spa) +{ + spa_config_dirent_t *dp; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); + + avl_remove(&spa_namespace_avl, spa); + cv_broadcast(&spa_namespace_cv); + + if (spa->spa_root) { + spa_strfree(spa->spa_root); + spa_active_count--; + } + + while ((dp = list_head(&spa->spa_config_list)) != NULL) { + list_remove(&spa->spa_config_list, dp); + if (dp->scd_path != NULL) + spa_strfree(dp->scd_path); + kmem_free(dp, sizeof (spa_config_dirent_t)); + } + + list_destroy(&spa->spa_config_list); + + spa_config_set(spa, NULL); + + refcount_destroy(&spa->spa_refcount); + + spa_config_lock_destroy(spa); + + cv_destroy(&spa->spa_async_cv); + cv_destroy(&spa->spa_async_root_cv); + cv_destroy(&spa->spa_scrub_io_cv); + cv_destroy(&spa->spa_suspend_cv); + + mutex_destroy(&spa->spa_async_lock); + mutex_destroy(&spa->spa_async_root_lock); + mutex_destroy(&spa->spa_scrub_lock); + mutex_destroy(&spa->spa_errlog_lock); + mutex_destroy(&spa->spa_errlist_lock); + mutex_destroy(&spa->spa_sync_bplist.bpl_lock); + mutex_destroy(&spa->spa_history_lock); + mutex_destroy(&spa->spa_props_lock); + mutex_destroy(&spa->spa_suspend_lock); + + kmem_free(spa, sizeof (spa_t)); +} + +/* + * Given a pool, return the next pool in the namespace, or NULL if there is + * none. If 'prev' is NULL, return the first pool. + */ +spa_t * +spa_next(spa_t *prev) +{ + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + if (prev) + return (AVL_NEXT(&spa_namespace_avl, prev)); + else + return (avl_first(&spa_namespace_avl)); +} + +/* + * ========================================================================== + * SPA refcount functions + * ========================================================================== + */ + +/* + * Add a reference to the given spa_t. Must have at least one reference, or + * have the namespace lock held. + */ +void +spa_open_ref(spa_t *spa, void *tag) +{ + ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || + MUTEX_HELD(&spa_namespace_lock)); + (void) refcount_add(&spa->spa_refcount, tag); +} + +/* + * Remove a reference to the given spa_t. Must have at least one reference, or + * have the namespace lock held. + */ +void +spa_close(spa_t *spa, void *tag) +{ + ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || + MUTEX_HELD(&spa_namespace_lock)); + (void) refcount_remove(&spa->spa_refcount, tag); +} + +/* + * Check to see if the spa refcount is zero. Must be called with + * spa_namespace_lock held. We really compare against spa_minref, which is the + * number of references acquired when opening a pool + */ +boolean_t +spa_refcount_zero(spa_t *spa) +{ + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + return (refcount_count(&spa->spa_refcount) == spa->spa_minref); +} + +/* + * ========================================================================== + * SPA spare and l2cache tracking + * ========================================================================== + */ + +/* + * Hot spares and cache devices are tracked using the same code below, + * for 'auxiliary' devices. + */ + +typedef struct spa_aux { + uint64_t aux_guid; + uint64_t aux_pool; + avl_node_t aux_avl; + int aux_count; +} spa_aux_t; + +static int +spa_aux_compare(const void *a, const void *b) +{ + const spa_aux_t *sa = a; + const spa_aux_t *sb = b; + + if (sa->aux_guid < sb->aux_guid) + return (-1); + else if (sa->aux_guid > sb->aux_guid) + return (1); + else + return (0); +} + +void +spa_aux_add(vdev_t *vd, avl_tree_t *avl) +{ + avl_index_t where; + spa_aux_t search; + spa_aux_t *aux; + + search.aux_guid = vd->vdev_guid; + if ((aux = avl_find(avl, &search, &where)) != NULL) { + aux->aux_count++; + } else { + aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); + aux->aux_guid = vd->vdev_guid; + aux->aux_count = 1; + avl_insert(avl, aux, where); + } +} + +void +spa_aux_remove(vdev_t *vd, avl_tree_t *avl) +{ + spa_aux_t search; + spa_aux_t *aux; + avl_index_t where; + + search.aux_guid = vd->vdev_guid; + aux = avl_find(avl, &search, &where); + + ASSERT(aux != NULL); + + if (--aux->aux_count == 0) { + avl_remove(avl, aux); + kmem_free(aux, sizeof (spa_aux_t)); + } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { + aux->aux_pool = 0ULL; + } +} + +boolean_t +spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) +{ + spa_aux_t search, *found; + + search.aux_guid = guid; + found = avl_find(avl, &search, NULL); + + if (pool) { + if (found) + *pool = found->aux_pool; + else + *pool = 0ULL; + } + + if (refcnt) { + if (found) + *refcnt = found->aux_count; + else + *refcnt = 0; + } + + return (found != NULL); +} + +void +spa_aux_activate(vdev_t *vd, avl_tree_t *avl) +{ + spa_aux_t search, *found; + avl_index_t where; + + search.aux_guid = vd->vdev_guid; + found = avl_find(avl, &search, &where); + ASSERT(found != NULL); + ASSERT(found->aux_pool == 0ULL); + + found->aux_pool = spa_guid(vd->vdev_spa); +} + +/* + * Spares are tracked globally due to the following constraints: + * + * - A spare may be part of multiple pools. + * - A spare may be added to a pool even if it's actively in use within + * another pool. + * - A spare in use in any pool can only be the source of a replacement if + * the target is a spare in the same pool. + * + * We keep track of all spares on the system through the use of a reference + * counted AVL tree. When a vdev is added as a spare, or used as a replacement + * spare, then we bump the reference count in the AVL tree. In addition, we set + * the 'vdev_isspare' member to indicate that the device is a spare (active or + * inactive). When a spare is made active (used to replace a device in the + * pool), we also keep track of which pool its been made a part of. + * + * The 'spa_spare_lock' protects the AVL tree. These functions are normally + * called under the spa_namespace lock as part of vdev reconfiguration. The + * separate spare lock exists for the status query path, which does not need to + * be completely consistent with respect to other vdev configuration changes. + */ + +static int +spa_spare_compare(const void *a, const void *b) +{ + return (spa_aux_compare(a, b)); +} + +void +spa_spare_add(vdev_t *vd) +{ + mutex_enter(&spa_spare_lock); + ASSERT(!vd->vdev_isspare); + spa_aux_add(vd, &spa_spare_avl); + vd->vdev_isspare = B_TRUE; + mutex_exit(&spa_spare_lock); +} + +void +spa_spare_remove(vdev_t *vd) +{ + mutex_enter(&spa_spare_lock); + ASSERT(vd->vdev_isspare); + spa_aux_remove(vd, &spa_spare_avl); + vd->vdev_isspare = B_FALSE; + mutex_exit(&spa_spare_lock); +} + +boolean_t +spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) +{ + boolean_t found; + + mutex_enter(&spa_spare_lock); + found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); + mutex_exit(&spa_spare_lock); + + return (found); +} + +void +spa_spare_activate(vdev_t *vd) +{ + mutex_enter(&spa_spare_lock); + ASSERT(vd->vdev_isspare); + spa_aux_activate(vd, &spa_spare_avl); + mutex_exit(&spa_spare_lock); +} + +/* + * Level 2 ARC devices are tracked globally for the same reasons as spares. + * Cache devices currently only support one pool per cache device, and so + * for these devices the aux reference count is currently unused beyond 1. + */ + +static int +spa_l2cache_compare(const void *a, const void *b) +{ + return (spa_aux_compare(a, b)); +} + +void +spa_l2cache_add(vdev_t *vd) +{ + mutex_enter(&spa_l2cache_lock); + ASSERT(!vd->vdev_isl2cache); + spa_aux_add(vd, &spa_l2cache_avl); + vd->vdev_isl2cache = B_TRUE; + mutex_exit(&spa_l2cache_lock); +} + +void +spa_l2cache_remove(vdev_t *vd) +{ + mutex_enter(&spa_l2cache_lock); + ASSERT(vd->vdev_isl2cache); + spa_aux_remove(vd, &spa_l2cache_avl); + vd->vdev_isl2cache = B_FALSE; + mutex_exit(&spa_l2cache_lock); +} + +boolean_t +spa_l2cache_exists(uint64_t guid, uint64_t *pool) +{ + boolean_t found; + + mutex_enter(&spa_l2cache_lock); + found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); + mutex_exit(&spa_l2cache_lock); + + return (found); +} + +void +spa_l2cache_activate(vdev_t *vd) +{ + mutex_enter(&spa_l2cache_lock); + ASSERT(vd->vdev_isl2cache); + spa_aux_activate(vd, &spa_l2cache_avl); + mutex_exit(&spa_l2cache_lock); +} + +void +spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc) +{ + vdev_space_update(vd, space, alloc, B_FALSE); +} + +/* + * ========================================================================== + * SPA vdev locking + * ========================================================================== + */ + +/* + * Lock the given spa_t for the purpose of adding or removing a vdev. + * Grabs the global spa_namespace_lock plus the spa config lock for writing. + * It returns the next transaction group for the spa_t. + */ +uint64_t +spa_vdev_enter(spa_t *spa) +{ + mutex_enter(&spa_namespace_lock); + + spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); + + return (spa_last_synced_txg(spa) + 1); +} + +/* + * Unlock the spa_t after adding or removing a vdev. Besides undoing the + * locking of spa_vdev_enter(), we also want make sure the transactions have + * synced to disk, and then update the global configuration cache with the new + * information. + */ +int +spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) +{ + int config_changed = B_FALSE; + + ASSERT(txg > spa_last_synced_txg(spa)); + + spa->spa_pending_vdev = NULL; + + /* + * Reassess the DTLs. + */ + vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); + + /* + * If the config changed, notify the scrub thread that it must restart. + */ + if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { + dsl_pool_scrub_restart(spa->spa_dsl_pool); + config_changed = B_TRUE; + } + + spa_config_exit(spa, SCL_ALL, spa); + + /* + * Note: this txg_wait_synced() is important because it ensures + * that there won't be more than one config change per txg. + * This allows us to use the txg as the generation number. + */ + if (error == 0) + txg_wait_synced(spa->spa_dsl_pool, txg); + + if (vd != NULL) { + ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0); + vdev_free(vd); + } + + /* + * If the config changed, update the config cache. + */ + if (config_changed) + spa_config_sync(spa, B_FALSE, B_TRUE); + + mutex_exit(&spa_namespace_lock); + + return (error); +} + +/* + * Lock the given spa_t for the purpose of changing vdev state. + */ +void +spa_vdev_state_enter(spa_t *spa) +{ + spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); +} + +int +spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) +{ + if (vd != NULL) + vdev_state_dirty(vd->vdev_top); + + spa_config_exit(spa, SCL_STATE_ALL, spa); + + return (error); +} + +/* + * ========================================================================== + * Miscellaneous functions + * ========================================================================== + */ + +/* + * Rename a spa_t. + */ +int +spa_rename(const char *name, const char *newname) +{ + spa_t *spa; + int err; + + /* + * Lookup the spa_t and grab the config lock for writing. We need to + * actually open the pool so that we can sync out the necessary labels. + * It's OK to call spa_open() with the namespace lock held because we + * allow recursive calls for other reasons. + */ + mutex_enter(&spa_namespace_lock); + if ((err = spa_open(name, &spa, FTAG)) != 0) { + mutex_exit(&spa_namespace_lock); + return (err); + } + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + + avl_remove(&spa_namespace_avl, spa); + (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); + avl_add(&spa_namespace_avl, spa); + + /* + * Sync all labels to disk with the new names by marking the root vdev + * dirty and waiting for it to sync. It will pick up the new pool name + * during the sync. + */ + vdev_config_dirty(spa->spa_root_vdev); + + spa_config_exit(spa, SCL_ALL, FTAG); + + txg_wait_synced(spa->spa_dsl_pool, 0); + + /* + * Sync the updated config cache. + */ + spa_config_sync(spa, B_FALSE, B_TRUE); + + spa_close(spa, FTAG); + + mutex_exit(&spa_namespace_lock); + + return (0); +} + + +/* + * Determine whether a pool with given pool_guid exists. If device_guid is + * non-zero, determine whether the pool exists *and* contains a device with the + * specified device_guid. + */ +boolean_t +spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) +{ + spa_t *spa; + avl_tree_t *t = &spa_namespace_avl; + + ASSERT(MUTEX_HELD(&spa_namespace_lock)); + + for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { + if (spa->spa_state == POOL_STATE_UNINITIALIZED) + continue; + if (spa->spa_root_vdev == NULL) + continue; + if (spa_guid(spa) == pool_guid) { + if (device_guid == 0) + break; + + if (vdev_lookup_by_guid(spa->spa_root_vdev, + device_guid) != NULL) + break; + + /* + * Check any devices we may be in the process of adding. + */ + if (spa->spa_pending_vdev) { + if (vdev_lookup_by_guid(spa->spa_pending_vdev, + device_guid) != NULL) + break; + } + } + } + + return (spa != NULL); +} + +char * +spa_strdup(const char *s) +{ + size_t len; + char *new; + + len = strlen(s); + new = kmem_alloc(len + 1, KM_SLEEP); + bcopy(s, new, len); + new[len] = '\0'; + + return (new); +} + +void +spa_strfree(char *s) +{ + kmem_free(s, strlen(s) + 1); +} + +uint64_t +spa_get_random(uint64_t range) +{ + uint64_t r; + + ASSERT(range != 0); + + (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); + + return (r % range); +} + +void +sprintf_blkptr(char *buf, int len, const blkptr_t *bp) +{ + int d; + + if (bp == NULL) { + (void) snprintf(buf, len, ""); + return; + } + + if (BP_IS_HOLE(bp)) { + (void) snprintf(buf, len, ""); + return; + } + + (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ", + (u_longlong_t)BP_GET_LEVEL(bp), + dmu_ot[BP_GET_TYPE(bp)].ot_name, + (u_longlong_t)BP_GET_LSIZE(bp), + (u_longlong_t)BP_GET_PSIZE(bp)); + + for (d = 0; d < BP_GET_NDVAS(bp); d++) { + const dva_t *dva = &bp->blk_dva[d]; + (void) snprintf(buf + strlen(buf), len - strlen(buf), + "DVA[%d]=<%llu:%llx:%llx> ", d, + (u_longlong_t)DVA_GET_VDEV(dva), + (u_longlong_t)DVA_GET_OFFSET(dva), + (u_longlong_t)DVA_GET_ASIZE(dva)); + } + + (void) snprintf(buf + strlen(buf), len - strlen(buf), + "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx", + zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, + zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, + BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", + BP_IS_GANG(bp) ? "gang" : "contiguous", + (u_longlong_t)bp->blk_birth, + (u_longlong_t)bp->blk_fill, + (u_longlong_t)bp->blk_cksum.zc_word[0], + (u_longlong_t)bp->blk_cksum.zc_word[1], + (u_longlong_t)bp->blk_cksum.zc_word[2], + (u_longlong_t)bp->blk_cksum.zc_word[3]); +} + +void +spa_freeze(spa_t *spa) +{ + uint64_t freeze_txg = 0; + + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + if (spa->spa_freeze_txg == UINT64_MAX) { + freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; + spa->spa_freeze_txg = freeze_txg; + } + spa_config_exit(spa, SCL_ALL, FTAG); + if (freeze_txg != 0) + txg_wait_synced(spa_get_dsl(spa), freeze_txg); +} + +void +zfs_panic_recover(const char *fmt, ...) +{ + va_list adx; + + va_start(adx, fmt); + vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); + va_end(adx); +} + +/* + * ========================================================================== + * Accessor functions + * ========================================================================== + */ + +boolean_t +spa_shutting_down(spa_t *spa) +{ + return (spa->spa_async_suspended); +} + +dsl_pool_t * +spa_get_dsl(spa_t *spa) +{ + return (spa->spa_dsl_pool); +} + +blkptr_t * +spa_get_rootblkptr(spa_t *spa) +{ + return (&spa->spa_ubsync.ub_rootbp); +} + +void +spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) +{ + spa->spa_uberblock.ub_rootbp = *bp; +} + +void +spa_altroot(spa_t *spa, char *buf, size_t buflen) +{ + if (spa->spa_root == NULL) + buf[0] = '\0'; + else + (void) strncpy(buf, spa->spa_root, buflen); +} + +int +spa_sync_pass(spa_t *spa) +{ + return (spa->spa_sync_pass); +} + +char * +spa_name(spa_t *spa) +{ + return (spa->spa_name); +} + +uint64_t +spa_guid(spa_t *spa) +{ + /* + * If we fail to parse the config during spa_load(), we can go through + * the error path (which posts an ereport) and end up here with no root + * vdev. We stash the original pool guid in 'spa_load_guid' to handle + * this case. + */ + if (spa->spa_root_vdev != NULL) + return (spa->spa_root_vdev->vdev_guid); + else + return (spa->spa_load_guid); +} + +uint64_t +spa_last_synced_txg(spa_t *spa) +{ + return (spa->spa_ubsync.ub_txg); +} + +uint64_t +spa_first_txg(spa_t *spa) +{ + return (spa->spa_first_txg); +} + +pool_state_t +spa_state(spa_t *spa) +{ + return (spa->spa_state); +} + +uint64_t +spa_freeze_txg(spa_t *spa) +{ + return (spa->spa_freeze_txg); +} + +/* + * Return how much space is allocated in the pool (ie. sum of all asize) + */ +uint64_t +spa_get_alloc(spa_t *spa) +{ + return (spa->spa_root_vdev->vdev_stat.vs_alloc); +} + +/* + * Return how much (raid-z inflated) space there is in the pool. + */ +uint64_t +spa_get_space(spa_t *spa) +{ + return (spa->spa_root_vdev->vdev_stat.vs_space); +} + +/* + * Return the amount of raid-z-deflated space in the pool. + */ +uint64_t +spa_get_dspace(spa_t *spa) +{ + if (spa->spa_deflate) + return (spa->spa_root_vdev->vdev_stat.vs_dspace); + else + return (spa->spa_root_vdev->vdev_stat.vs_space); +} + +/* ARGSUSED */ +uint64_t +spa_get_asize(spa_t *spa, uint64_t lsize) +{ + /* + * For now, the worst case is 512-byte RAID-Z blocks, in which + * case the space requirement is exactly 2x; so just assume that. + * Add to this the fact that we can have up to 3 DVAs per bp, and + * we have to multiply by a total of 6x. + */ + return (lsize * 6); +} + +/* + * Return the failure mode that has been set to this pool. The default + * behavior will be to block all I/Os when a complete failure occurs. + */ +uint8_t +spa_get_failmode(spa_t *spa) +{ + return (spa->spa_failmode); +} + +boolean_t +spa_suspended(spa_t *spa) +{ + return (spa->spa_suspended); +} + +uint64_t +spa_version(spa_t *spa) +{ + return (spa->spa_ubsync.ub_version); +} + +int +spa_max_replication(spa_t *spa) +{ + /* + * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to + * handle BPs with more than one DVA allocated. Set our max + * replication level accordingly. + */ + if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) + return (1); + return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); +} + +uint64_t +bp_get_dasize(spa_t *spa, const blkptr_t *bp) +{ + int sz = 0, i; + + if (!spa->spa_deflate) + return (BP_GET_ASIZE(bp)); + + spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); + for (i = 0; i < SPA_DVAS_PER_BP; i++) { + vdev_t *vd = + vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i])); + if (vd) + sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> + SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; + } + spa_config_exit(spa, SCL_VDEV, FTAG); + return (sz); +} + +/* + * ========================================================================== + * Initialization and Termination + * ========================================================================== + */ + +static int +spa_name_compare(const void *a1, const void *a2) +{ + const spa_t *s1 = a1; + const spa_t *s2 = a2; + int s; + + s = strcmp(s1->spa_name, s2->spa_name); + if (s > 0) + return (1); + if (s < 0) + return (-1); + return (0); +} + +int +spa_busy(void) +{ + return (spa_active_count); +} + +void +spa_boot_init() +{ + spa_config_load(); +} + +void +spa_init(int mode) +{ + mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); + cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); + + avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), + offsetof(spa_t, spa_avl)); + + avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), + offsetof(spa_aux_t, aux_avl)); + + avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), + offsetof(spa_aux_t, aux_avl)); + + spa_mode = mode; + + refcount_init(); + unique_init(); + zio_init(); + dmu_init(); + zil_init(); + vdev_cache_stat_init(); + zfs_prop_init(); + zpool_prop_init(); + spa_config_load(); + l2arc_start(); +} + +void +spa_fini(void) +{ + l2arc_stop(); + + spa_evict_all(); + + vdev_cache_stat_fini(); + zil_fini(); + dmu_fini(); + zio_fini(); + unique_fini(); + refcount_fini(); + + avl_destroy(&spa_namespace_avl); + avl_destroy(&spa_spare_avl); + avl_destroy(&spa_l2cache_avl); + + cv_destroy(&spa_namespace_cv); + mutex_destroy(&spa_namespace_lock); + mutex_destroy(&spa_spare_lock); + mutex_destroy(&spa_l2cache_lock); +} + +/* + * Return whether this pool has slogs. No locking needed. + * It's not a problem if the wrong answer is returned as it's only for + * performance and not correctness + */ +boolean_t +spa_has_slogs(spa_t *spa) +{ + return (spa->spa_log_class->mc_rotor != NULL); +} + +/* + * Return whether this pool is the root pool. + */ +boolean_t +spa_is_root(spa_t *spa) +{ + return (spa->spa_is_root); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/space_map.c b/external/cddl/osnet/dist/uts/common/fs/zfs/space_map.c new file mode 100644 index 000000000000..0a1fd59eaba6 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/space_map.c @@ -0,0 +1,506 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include + +/* + * Space map routines. + * NOTE: caller is responsible for all locking. + */ +static int +space_map_seg_compare(const void *x1, const void *x2) +{ + const space_seg_t *s1 = x1; + const space_seg_t *s2 = x2; + + if (s1->ss_start < s2->ss_start) { + if (s1->ss_end > s2->ss_start) + return (0); + return (-1); + } + if (s1->ss_start > s2->ss_start) { + if (s1->ss_start < s2->ss_end) + return (0); + return (1); + } + return (0); +} + +void +space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift, + kmutex_t *lp) +{ + bzero(sm, sizeof (*sm)); + + avl_create(&sm->sm_root, space_map_seg_compare, + sizeof (space_seg_t), offsetof(struct space_seg, ss_node)); + + sm->sm_start = start; + sm->sm_size = size; + sm->sm_shift = shift; + sm->sm_lock = lp; +} + +void +space_map_destroy(space_map_t *sm) +{ + ASSERT(!sm->sm_loaded && !sm->sm_loading); + VERIFY3U(sm->sm_space, ==, 0); + avl_destroy(&sm->sm_root); +} + +void +space_map_add(space_map_t *sm, uint64_t start, uint64_t size) +{ + avl_index_t where; + space_seg_t ssearch, *ss_before, *ss_after, *ss; + uint64_t end = start + size; + int merge_before, merge_after; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + VERIFY(size != 0); + VERIFY3U(start, >=, sm->sm_start); + VERIFY3U(end, <=, sm->sm_start + sm->sm_size); + VERIFY(sm->sm_space + size <= sm->sm_size); + VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); + VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); + + ssearch.ss_start = start; + ssearch.ss_end = end; + ss = avl_find(&sm->sm_root, &ssearch, &where); + + if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) { + zfs_panic_recover("zfs: allocating allocated segment" + "(offset=%llu size=%llu)\n", + (longlong_t)start, (longlong_t)size); + return; + } + + /* Make sure we don't overlap with either of our neighbors */ + VERIFY(ss == NULL); + + ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE); + ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER); + + merge_before = (ss_before != NULL && ss_before->ss_end == start); + merge_after = (ss_after != NULL && ss_after->ss_start == end); + + if (merge_before && merge_after) { + avl_remove(&sm->sm_root, ss_before); + ss_after->ss_start = ss_before->ss_start; + kmem_free(ss_before, sizeof (*ss_before)); + } else if (merge_before) { + ss_before->ss_end = end; + } else if (merge_after) { + ss_after->ss_start = start; + } else { + ss = kmem_alloc(sizeof (*ss), KM_SLEEP); + ss->ss_start = start; + ss->ss_end = end; + avl_insert(&sm->sm_root, ss, where); + } + + sm->sm_space += size; +} + +void +space_map_remove(space_map_t *sm, uint64_t start, uint64_t size) +{ + avl_index_t where; + space_seg_t ssearch, *ss, *newseg; + uint64_t end = start + size; + int left_over, right_over; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + VERIFY(size != 0); + VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); + VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); + + ssearch.ss_start = start; + ssearch.ss_end = end; + ss = avl_find(&sm->sm_root, &ssearch, &where); + + /* Make sure we completely overlap with someone */ + if (ss == NULL) { + zfs_panic_recover("zfs: freeing free segment " + "(offset=%llu size=%llu)", + (longlong_t)start, (longlong_t)size); + return; + } + VERIFY3U(ss->ss_start, <=, start); + VERIFY3U(ss->ss_end, >=, end); + VERIFY(sm->sm_space - size <= sm->sm_size); + + left_over = (ss->ss_start != start); + right_over = (ss->ss_end != end); + + if (left_over && right_over) { + newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP); + newseg->ss_start = end; + newseg->ss_end = ss->ss_end; + ss->ss_end = start; + avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER); + } else if (left_over) { + ss->ss_end = start; + } else if (right_over) { + ss->ss_start = end; + } else { + avl_remove(&sm->sm_root, ss); + kmem_free(ss, sizeof (*ss)); + } + + sm->sm_space -= size; +} + +int +space_map_contains(space_map_t *sm, uint64_t start, uint64_t size) +{ + avl_index_t where; + space_seg_t ssearch, *ss; + uint64_t end = start + size; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + VERIFY(size != 0); + VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0); + VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0); + + ssearch.ss_start = start; + ssearch.ss_end = end; + ss = avl_find(&sm->sm_root, &ssearch, &where); + + return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end); +} + +void +space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) +{ + space_seg_t *ss; + void *cookie = NULL; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + + while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) { + if (func != NULL) + func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); + kmem_free(ss, sizeof (*ss)); + } + sm->sm_space = 0; +} + +void +space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest) +{ + space_seg_t *ss; + + for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) + func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); +} + +void +space_map_excise(space_map_t *sm, uint64_t start, uint64_t size) +{ + avl_tree_t *t = &sm->sm_root; + avl_index_t where; + space_seg_t *ss, search; + uint64_t end = start + size; + uint64_t rm_start, rm_end; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + + search.ss_start = start; + search.ss_end = start; + + for (;;) { + ss = avl_find(t, &search, &where); + + if (ss == NULL) + ss = avl_nearest(t, where, AVL_AFTER); + + if (ss == NULL || ss->ss_start >= end) + break; + + rm_start = MAX(ss->ss_start, start); + rm_end = MIN(ss->ss_end, end); + + space_map_remove(sm, rm_start, rm_end - rm_start); + } +} + +/* + * Replace smd with the union of smd and sms. + */ +void +space_map_union(space_map_t *smd, space_map_t *sms) +{ + avl_tree_t *t = &sms->sm_root; + space_seg_t *ss; + + ASSERT(MUTEX_HELD(smd->sm_lock)); + + /* + * For each source segment, remove any intersections with the + * destination, then add the source segment to the destination. + */ + for (ss = avl_first(t); ss != NULL; ss = AVL_NEXT(t, ss)) { + space_map_excise(smd, ss->ss_start, ss->ss_end - ss->ss_start); + space_map_add(smd, ss->ss_start, ss->ss_end - ss->ss_start); + } +} + +/* + * Wait for any in-progress space_map_load() to complete. + */ +void +space_map_load_wait(space_map_t *sm) +{ + ASSERT(MUTEX_HELD(sm->sm_lock)); + + while (sm->sm_loading) + cv_wait(&sm->sm_load_cv, sm->sm_lock); +} + +/* + * Note: space_map_load() will drop sm_lock across dmu_read() calls. + * The caller must be OK with this. + */ +int +space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype, + space_map_obj_t *smo, objset_t *os) +{ + uint64_t *entry, *entry_map, *entry_map_end; + uint64_t bufsize, size, offset, end, space; + uint64_t mapstart = sm->sm_start; + int error = 0; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + + space_map_load_wait(sm); + + if (sm->sm_loaded) + return (0); + + sm->sm_loading = B_TRUE; + end = smo->smo_objsize; + space = smo->smo_alloc; + + ASSERT(sm->sm_ops == NULL); + VERIFY3U(sm->sm_space, ==, 0); + + if (maptype == SM_FREE) { + space_map_add(sm, sm->sm_start, sm->sm_size); + space = sm->sm_size - space; + } + + bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT; + entry_map = zio_buf_alloc(bufsize); + + mutex_exit(sm->sm_lock); + if (end > bufsize) + dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize); + mutex_enter(sm->sm_lock); + + for (offset = 0; offset < end; offset += bufsize) { + size = MIN(end - offset, bufsize); + VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); + VERIFY(size != 0); + + dprintf("object=%llu offset=%llx size=%llx\n", + smo->smo_object, offset, size); + + mutex_exit(sm->sm_lock); + error = dmu_read(os, smo->smo_object, offset, size, entry_map); + mutex_enter(sm->sm_lock); + if (error != 0) + break; + + entry_map_end = entry_map + (size / sizeof (uint64_t)); + for (entry = entry_map; entry < entry_map_end; entry++) { + uint64_t e = *entry; + + if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ + continue; + + (SM_TYPE_DECODE(e) == maptype ? + space_map_add : space_map_remove)(sm, + (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart, + SM_RUN_DECODE(e) << sm->sm_shift); + } + } + + if (error == 0) { + VERIFY3U(sm->sm_space, ==, space); + + sm->sm_loaded = B_TRUE; + sm->sm_ops = ops; + if (ops != NULL) + ops->smop_load(sm); + } else { + space_map_vacate(sm, NULL, NULL); + } + + zio_buf_free(entry_map, bufsize); + + sm->sm_loading = B_FALSE; + + cv_broadcast(&sm->sm_load_cv); + + return (error); +} + +void +space_map_unload(space_map_t *sm) +{ + ASSERT(MUTEX_HELD(sm->sm_lock)); + + if (sm->sm_loaded && sm->sm_ops != NULL) + sm->sm_ops->smop_unload(sm); + + sm->sm_loaded = B_FALSE; + sm->sm_ops = NULL; + + space_map_vacate(sm, NULL, NULL); +} + +uint64_t +space_map_alloc(space_map_t *sm, uint64_t size) +{ + uint64_t start; + + start = sm->sm_ops->smop_alloc(sm, size); + if (start != -1ULL) + space_map_remove(sm, start, size); + return (start); +} + +void +space_map_claim(space_map_t *sm, uint64_t start, uint64_t size) +{ + sm->sm_ops->smop_claim(sm, start, size); + space_map_remove(sm, start, size); +} + +void +space_map_free(space_map_t *sm, uint64_t start, uint64_t size) +{ + space_map_add(sm, start, size); + sm->sm_ops->smop_free(sm, start, size); +} + +/* + * Note: space_map_sync() will drop sm_lock across dmu_write() calls. + */ +void +space_map_sync(space_map_t *sm, uint8_t maptype, + space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) +{ + spa_t *spa = dmu_objset_spa(os); + void *cookie = NULL; + space_seg_t *ss; + uint64_t bufsize, start, size, run_len; + uint64_t *entry, *entry_map, *entry_map_end; + + ASSERT(MUTEX_HELD(sm->sm_lock)); + + if (sm->sm_space == 0) + return; + + dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n", + smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa), + maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root), + sm->sm_space); + + if (maptype == SM_ALLOC) + smo->smo_alloc += sm->sm_space; + else + smo->smo_alloc -= sm->sm_space; + + bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t); + bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT); + entry_map = zio_buf_alloc(bufsize); + entry_map_end = entry_map + (bufsize / sizeof (uint64_t)); + entry = entry_map; + + *entry++ = SM_DEBUG_ENCODE(1) | + SM_DEBUG_ACTION_ENCODE(maptype) | + SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | + SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); + + while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) { + size = ss->ss_end - ss->ss_start; + start = (ss->ss_start - sm->sm_start) >> sm->sm_shift; + + sm->sm_space -= size; + size >>= sm->sm_shift; + + while (size) { + run_len = MIN(size, SM_RUN_MAX); + + if (entry == entry_map_end) { + mutex_exit(sm->sm_lock); + dmu_write(os, smo->smo_object, smo->smo_objsize, + bufsize, entry_map, tx); + mutex_enter(sm->sm_lock); + smo->smo_objsize += bufsize; + entry = entry_map; + } + + *entry++ = SM_OFFSET_ENCODE(start) | + SM_TYPE_ENCODE(maptype) | + SM_RUN_ENCODE(run_len); + + start += run_len; + size -= run_len; + } + kmem_free(ss, sizeof (*ss)); + } + + if (entry != entry_map) { + size = (entry - entry_map) * sizeof (uint64_t); + mutex_exit(sm->sm_lock); + dmu_write(os, smo->smo_object, smo->smo_objsize, + size, entry_map, tx); + mutex_enter(sm->sm_lock); + smo->smo_objsize += size; + } + + zio_buf_free(entry_map, bufsize); + + VERIFY3U(sm->sm_space, ==, 0); +} + +void +space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx) +{ + VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0); + + smo->smo_objsize = 0; + smo->smo_alloc = 0; +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/arc.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/arc.h new file mode 100644 index 000000000000..749bf53e5b5e --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/arc.h @@ -0,0 +1,138 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ARC_H +#define _SYS_ARC_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +typedef struct arc_buf_hdr arc_buf_hdr_t; +typedef struct arc_buf arc_buf_t; +typedef void arc_done_func_t(zio_t *zio, arc_buf_t *buf, void *private); +typedef int arc_evict_func_t(void *private); + +/* generic arc_done_func_t's which you can use */ +arc_done_func_t arc_bcopy_func; +arc_done_func_t arc_getbuf_func; + +struct arc_buf { + arc_buf_hdr_t *b_hdr; + arc_buf_t *b_next; + krwlock_t b_lock; + void *b_data; + arc_evict_func_t *b_efunc; + void *b_private; +}; + +typedef enum arc_buf_contents { + ARC_BUFC_DATA, /* buffer contains data */ + ARC_BUFC_METADATA, /* buffer contains metadata */ + ARC_BUFC_NUMTYPES +} arc_buf_contents_t; +/* + * These are the flags we pass into calls to the arc + */ +#define ARC_WAIT (1 << 1) /* perform I/O synchronously */ +#define ARC_NOWAIT (1 << 2) /* perform I/O asynchronously */ +#define ARC_PREFETCH (1 << 3) /* I/O is a prefetch */ +#define ARC_CACHED (1 << 4) /* I/O was already in cache */ +#define ARC_L2CACHE (1 << 5) /* cache in L2ARC */ + +void arc_space_consume(uint64_t space); +void arc_space_return(uint64_t space); +void *arc_data_buf_alloc(uint64_t space); +void arc_data_buf_free(void *buf, uint64_t space); +arc_buf_t *arc_buf_alloc(spa_t *spa, int size, void *tag, + arc_buf_contents_t type); +void arc_buf_add_ref(arc_buf_t *buf, void *tag); +int arc_buf_remove_ref(arc_buf_t *buf, void *tag); +int arc_buf_size(arc_buf_t *buf); +void arc_release(arc_buf_t *buf, void *tag); +int arc_released(arc_buf_t *buf); +int arc_has_callback(arc_buf_t *buf); +void arc_buf_freeze(arc_buf_t *buf); +void arc_buf_thaw(arc_buf_t *buf); +#ifdef ZFS_DEBUG +int arc_referenced(arc_buf_t *buf); +#endif + +typedef struct writeprops { + dmu_object_type_t wp_type; + uint8_t wp_level; + uint8_t wp_copies; + uint8_t wp_dncompress, wp_oscompress; + uint8_t wp_dnchecksum, wp_oschecksum; +} writeprops_t; + +void write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp); +int arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, + arc_done_func_t *done, void *private, int priority, int zio_flags, + uint32_t *arc_flags, const zbookmark_t *zb); +int arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, + arc_done_func_t *done, void *private, int priority, int flags, + uint32_t *arc_flags, const zbookmark_t *zb); +zio_t *arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, + boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, + arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, + int zio_flags, const zbookmark_t *zb); +int arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + zio_done_func_t *done, void *private, uint32_t arc_flags); +int arc_tryread(spa_t *spa, blkptr_t *bp, void *data); + +void arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private); +int arc_buf_evict(arc_buf_t *buf); + +void arc_flush(spa_t *spa); +void arc_tempreserve_clear(uint64_t reserve); +int arc_tempreserve_space(uint64_t reserve, uint64_t txg); + +void arc_init(void); +void arc_fini(void); + +/* + * Level 2 ARC + */ + +void l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end); +void l2arc_remove_vdev(vdev_t *vd); +boolean_t l2arc_vdev_present(vdev_t *vd); +void l2arc_init(void); +void l2arc_fini(void); +void l2arc_start(void); +void l2arc_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ARC_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/bplist.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/bplist.h new file mode 100644 index 000000000000..cdb93a6c35a3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/bplist.h @@ -0,0 +1,89 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_BPLIST_H +#define _SYS_BPLIST_H + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct bplist_phys { + /* + * This is the bonus buffer for the dead lists. The object's + * contents is an array of bpl_entries blkptr_t's, representing + * a total of bpl_bytes physical space. + */ + uint64_t bpl_entries; + uint64_t bpl_bytes; + uint64_t bpl_comp; + uint64_t bpl_uncomp; +} bplist_phys_t; + +#define BPLIST_SIZE_V0 (2 * sizeof (uint64_t)) + +typedef struct bplist_q { + blkptr_t bpq_blk; + void *bpq_next; +} bplist_q_t; + +typedef struct bplist { + kmutex_t bpl_lock; + objset_t *bpl_mos; + uint64_t bpl_object; + uint8_t bpl_blockshift; + uint8_t bpl_bpshift; + uint8_t bpl_havecomp; + bplist_q_t *bpl_queue; + bplist_phys_t *bpl_phys; + dmu_buf_t *bpl_dbuf; + dmu_buf_t *bpl_cached_dbuf; +} bplist_t; + +extern uint64_t bplist_create(objset_t *mos, int blocksize, dmu_tx_t *tx); +extern void bplist_destroy(objset_t *mos, uint64_t object, dmu_tx_t *tx); +extern int bplist_open(bplist_t *bpl, objset_t *mos, uint64_t object); +extern void bplist_close(bplist_t *bpl); +extern boolean_t bplist_empty(bplist_t *bpl); +extern int bplist_iterate(bplist_t *bpl, uint64_t *itorp, blkptr_t *bp); +extern int bplist_enqueue(bplist_t *bpl, const blkptr_t *bp, dmu_tx_t *tx); +extern void bplist_enqueue_deferred(bplist_t *bpl, const blkptr_t *bp); +extern void bplist_sync(bplist_t *bpl, dmu_tx_t *tx); +extern void bplist_vacate(bplist_t *bpl, dmu_tx_t *tx); +extern int bplist_space(bplist_t *bpl, + uint64_t *usedp, uint64_t *compp, uint64_t *uncompp); +extern int bplist_space_birthrange(bplist_t *bpl, + uint64_t mintxg, uint64_t maxtxg, uint64_t *dasizep); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_BPLIST_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dbuf.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dbuf.h new file mode 100644 index 000000000000..75ce27264e3c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dbuf.h @@ -0,0 +1,347 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DBUF_H +#define _SYS_DBUF_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define DB_BONUS_BLKID (-1ULL) +#define IN_DMU_SYNC 2 + +/* + * define flags for dbuf_read + */ + +#define DB_RF_MUST_SUCCEED (1 << 0) +#define DB_RF_CANFAIL (1 << 1) +#define DB_RF_HAVESTRUCT (1 << 2) +#define DB_RF_NOPREFETCH (1 << 3) +#define DB_RF_NEVERWAIT (1 << 4) +#define DB_RF_CACHED (1 << 5) + +/* + * The simplified state transition diagram for dbufs looks like: + * + * +----> READ ----+ + * | | + * | V + * (alloc)-->UNCACHED CACHED-->EVICTING-->(free) + * | ^ ^ + * | | | + * +----> FILL ----+ | + * | | + * | | + * +--------> NOFILL -------+ + */ +typedef enum dbuf_states { + DB_UNCACHED, + DB_FILL, + DB_NOFILL, + DB_READ, + DB_CACHED, + DB_EVICTING +} dbuf_states_t; + +struct objset_impl; +struct dnode; +struct dmu_tx; + +/* + * level = 0 means the user data + * level = 1 means the single indirect block + * etc. + */ + +#define LIST_LINK_INACTIVE(link) \ + ((link)->list_next == NULL && (link)->list_prev == NULL) + +struct dmu_buf_impl; + +typedef enum override_states { + DR_NOT_OVERRIDDEN, + DR_IN_DMU_SYNC, + DR_OVERRIDDEN +} override_states_t; + +typedef struct dbuf_dirty_record { + /* link on our parents dirty list */ + list_node_t dr_dirty_node; + + /* transaction group this data will sync in */ + uint64_t dr_txg; + + /* zio of outstanding write IO */ + zio_t *dr_zio; + + /* pointer back to our dbuf */ + struct dmu_buf_impl *dr_dbuf; + + /* pointer to next dirty record */ + struct dbuf_dirty_record *dr_next; + + /* pointer to parent dirty record */ + struct dbuf_dirty_record *dr_parent; + + union dirty_types { + struct dirty_indirect { + + /* protect access to list */ + kmutex_t dr_mtx; + + /* Our list of dirty children */ + list_t dr_children; + } di; + struct dirty_leaf { + + /* + * dr_data is set when we dirty the buffer + * so that we can retain the pointer even if it + * gets COW'd in a subsequent transaction group. + */ + arc_buf_t *dr_data; + blkptr_t dr_overridden_by; + override_states_t dr_override_state; + } dl; + } dt; +} dbuf_dirty_record_t; + +typedef struct dmu_buf_impl { + /* + * The following members are immutable, with the exception of + * db.db_data, which is protected by db_mtx. + */ + + /* the publicly visible structure */ + dmu_buf_t db; + + /* the objset we belong to */ + struct objset_impl *db_objset; + + /* + * the dnode we belong to (NULL when evicted) + */ + struct dnode *db_dnode; + + /* + * our parent buffer; if the dnode points to us directly, + * db_parent == db_dnode->dn_dbuf + * only accessed by sync thread ??? + * (NULL when evicted) + */ + struct dmu_buf_impl *db_parent; + + /* + * link for hash table of all dmu_buf_impl_t's + */ + struct dmu_buf_impl *db_hash_next; + + /* our block number */ + uint64_t db_blkid; + + /* + * Pointer to the blkptr_t which points to us. May be NULL if we + * don't have one yet. (NULL when evicted) + */ + blkptr_t *db_blkptr; + + /* + * Our indirection level. Data buffers have db_level==0. + * Indirect buffers which point to data buffers have + * db_level==1. etc. Buffers which contain dnodes have + * db_level==0, since the dnodes are stored in a file. + */ + uint8_t db_level; + + /* db_mtx protects the members below */ + kmutex_t db_mtx; + + /* + * Current state of the buffer + */ + dbuf_states_t db_state; + + /* + * Refcount accessed by dmu_buf_{hold,rele}. + * If nonzero, the buffer can't be destroyed. + * Protected by db_mtx. + */ + refcount_t db_holds; + + /* buffer holding our data */ + arc_buf_t *db_buf; + + kcondvar_t db_changed; + dbuf_dirty_record_t *db_data_pending; + + /* pointer to most recent dirty record for this buffer */ + dbuf_dirty_record_t *db_last_dirty; + + /* + * Our link on the owner dnodes's dn_dbufs list. + * Protected by its dn_dbufs_mtx. + */ + list_node_t db_link; + + /* Data which is unique to data (leaf) blocks: */ + + /* stuff we store for the user (see dmu_buf_set_user) */ + void *db_user_ptr; + void **db_user_data_ptr_ptr; + dmu_buf_evict_func_t *db_evict_func; + + uint8_t db_immediate_evict; + uint8_t db_freed_in_flight; + + uint8_t db_dirtycnt; +} dmu_buf_impl_t; + +/* Note: the dbuf hash table is exposed only for the mdb module */ +#define DBUF_MUTEXES 256 +#define DBUF_HASH_MUTEX(h, idx) (&(h)->hash_mutexes[(idx) & (DBUF_MUTEXES-1)]) +typedef struct dbuf_hash_table { + uint64_t hash_table_mask; + dmu_buf_impl_t **hash_table; + kmutex_t hash_mutexes[DBUF_MUTEXES]; +} dbuf_hash_table_t; + + +uint64_t dbuf_whichblock(struct dnode *di, uint64_t offset); + +dmu_buf_impl_t *dbuf_create_tlib(struct dnode *dn, char *data); +void dbuf_create_bonus(struct dnode *dn); + +dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, void *tag); +dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid, + void *tag); +int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid, int create, + void *tag, dmu_buf_impl_t **dbp); + +void dbuf_prefetch(struct dnode *dn, uint64_t blkid); + +void dbuf_add_ref(dmu_buf_impl_t *db, void *tag); +uint64_t dbuf_refcount(dmu_buf_impl_t *db); + +void dbuf_rele(dmu_buf_impl_t *db, void *tag); + +dmu_buf_impl_t *dbuf_find(struct dnode *dn, uint8_t level, uint64_t blkid); + +int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags); +void dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx); +void dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx); +void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx); +void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx); +void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx); +dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx); + +void dbuf_clear(dmu_buf_impl_t *db); +void dbuf_evict(dmu_buf_impl_t *db); + +void dbuf_setdirty(dmu_buf_impl_t *db, dmu_tx_t *tx); +void dbuf_unoverride(dbuf_dirty_record_t *dr); +void dbuf_sync_list(list_t *list, dmu_tx_t *tx); + +void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end, + struct dmu_tx *); + +void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx); + +void dbuf_init(void); +void dbuf_fini(void); + +#define DBUF_IS_METADATA(db) \ + ((db)->db_level > 0 || dmu_ot[(db)->db_dnode->dn_type].ot_metadata) + +#define DBUF_GET_BUFC_TYPE(db) \ + (DBUF_IS_METADATA(db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA) + +#define DBUF_IS_CACHEABLE(db) \ + ((db)->db_objset->os_primary_cache == ZFS_CACHE_ALL || \ + (DBUF_IS_METADATA(db) && \ + ((db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA))) + +#define DBUF_IS_L2CACHEABLE(db) \ + ((db)->db_objset->os_secondary_cache == ZFS_CACHE_ALL || \ + (DBUF_IS_METADATA(db) && \ + ((db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA))) + +#ifdef ZFS_DEBUG + +/* + * There should be a ## between the string literal and fmt, to make it + * clear that we're joining two strings together, but gcc does not + * support that preprocessor token. + */ +#define dprintf_dbuf(dbuf, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char __db_buf[32]; \ + uint64_t __db_obj = (dbuf)->db.db_object; \ + if (__db_obj == DMU_META_DNODE_OBJECT) \ + (void) strcpy(__db_buf, "mdn"); \ + else \ + (void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \ + (u_longlong_t)__db_obj); \ + dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \ + "obj=%s lvl=%u blkid=%lld " fmt, \ + __db_buf, (dbuf)->db_level, \ + (u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \ + } \ +_NOTE(CONSTCOND) } while (0) + +#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \ + sprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp); \ + dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \ + kmem_free(__blkbuf, BP_SPRINTF_LEN); \ + } \ +_NOTE(CONSTCOND) } while (0) + +#define DBUF_VERIFY(db) dbuf_verify(db) + +#else + +#define dprintf_dbuf(db, fmt, ...) +#define dprintf_dbuf_bp(db, bp, fmt, ...) +#define DBUF_VERIFY(db) + +#endif + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DBUF_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu.h new file mode 100644 index 000000000000..3b1e5c8fbc1f --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu.h @@ -0,0 +1,638 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DMU_H +#define _SYS_DMU_H + +/* + * This file describes the interface that the DMU provides for its + * consumers. + * + * The DMU also interacts with the SPA. That interface is described in + * dmu_spa.h. + */ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct uio; +struct page; +struct vnode; +struct spa; +struct zilog; +struct zio; +struct blkptr; +struct zap_cursor; +struct dsl_dataset; +struct dsl_pool; +struct dnode; +struct drr_begin; +struct drr_end; +struct zbookmark; +struct spa; +struct nvlist; +struct objset_impl; + +typedef struct objset objset_t; +typedef struct dmu_tx dmu_tx_t; +typedef struct dsl_dir dsl_dir_t; + +typedef enum dmu_object_type { + DMU_OT_NONE, + /* general: */ + DMU_OT_OBJECT_DIRECTORY, /* ZAP */ + DMU_OT_OBJECT_ARRAY, /* UINT64 */ + DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ + DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ + DMU_OT_BPLIST, /* UINT64 */ + DMU_OT_BPLIST_HDR, /* UINT64 */ + /* spa: */ + DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ + DMU_OT_SPACE_MAP, /* UINT64 */ + /* zil: */ + DMU_OT_INTENT_LOG, /* UINT64 */ + /* dmu: */ + DMU_OT_DNODE, /* DNODE */ + DMU_OT_OBJSET, /* OBJSET */ + /* dsl: */ + DMU_OT_DSL_DIR, /* UINT64 */ + DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ + DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ + DMU_OT_DSL_PROPS, /* ZAP */ + DMU_OT_DSL_DATASET, /* UINT64 */ + /* zpl: */ + DMU_OT_ZNODE, /* ZNODE */ + DMU_OT_OLDACL, /* Old ACL */ + DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ + DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ + DMU_OT_MASTER_NODE, /* ZAP */ + DMU_OT_UNLINKED_SET, /* ZAP */ + /* zvol: */ + DMU_OT_ZVOL, /* UINT8 */ + DMU_OT_ZVOL_PROP, /* ZAP */ + /* other; for testing only! */ + DMU_OT_PLAIN_OTHER, /* UINT8 */ + DMU_OT_UINT64_OTHER, /* UINT64 */ + DMU_OT_ZAP_OTHER, /* ZAP */ + /* new object types: */ + DMU_OT_ERROR_LOG, /* ZAP */ + DMU_OT_SPA_HISTORY, /* UINT8 */ + DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ + DMU_OT_POOL_PROPS, /* ZAP */ + DMU_OT_DSL_PERMS, /* ZAP */ + DMU_OT_ACL, /* ACL */ + DMU_OT_SYSACL, /* SYSACL */ + DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ + DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ + DMU_OT_NEXT_CLONES, /* ZAP */ + DMU_OT_SCRUB_QUEUE, /* ZAP */ + DMU_OT_NUMTYPES +} dmu_object_type_t; + +typedef enum dmu_objset_type { + DMU_OST_NONE, + DMU_OST_META, + DMU_OST_ZFS, + DMU_OST_ZVOL, + DMU_OST_OTHER, /* For testing only! */ + DMU_OST_ANY, /* Be careful! */ + DMU_OST_NUMTYPES +} dmu_objset_type_t; + +void byteswap_uint64_array(void *buf, size_t size); +void byteswap_uint32_array(void *buf, size_t size); +void byteswap_uint16_array(void *buf, size_t size); +void byteswap_uint8_array(void *buf, size_t size); +void zap_byteswap(void *buf, size_t size); +void zfs_oldacl_byteswap(void *buf, size_t size); +void zfs_acl_byteswap(void *buf, size_t size); +void zfs_znode_byteswap(void *buf, size_t size); + +#define DS_MODE_NOHOLD 0 /* internal use only */ +#define DS_MODE_USER 1 /* simple access, no special needs */ +#define DS_MODE_OWNER 2 /* the "main" access, e.g. a mount */ +#define DS_MODE_TYPE_MASK 0x3 +#define DS_MODE_TYPE(x) ((x) & DS_MODE_TYPE_MASK) +#define DS_MODE_READONLY 0x8 +#define DS_MODE_IS_READONLY(x) ((x) & DS_MODE_READONLY) +#define DS_MODE_INCONSISTENT 0x10 +#define DS_MODE_IS_INCONSISTENT(x) ((x) & DS_MODE_INCONSISTENT) + +#define DS_FIND_SNAPSHOTS (1<<0) +#define DS_FIND_CHILDREN (1<<1) + +/* + * The maximum number of bytes that can be accessed as part of one + * operation, including metadata. + */ +#define DMU_MAX_ACCESS (10<<20) /* 10MB */ +#define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */ + +/* + * Public routines to create, destroy, open, and close objsets. + */ +int dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, + objset_t **osp); +int dmu_objset_open_ds(struct dsl_dataset *ds, dmu_objset_type_t type, + objset_t **osp); +void dmu_objset_close(objset_t *os); +int dmu_objset_evict_dbufs(objset_t *os); +int dmu_objset_create(const char *name, dmu_objset_type_t type, + objset_t *clone_parent, uint64_t flags, + void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); +int dmu_objset_destroy(const char *name); +int dmu_snapshots_destroy(char *fsname, char *snapname); +int dmu_objset_rollback(objset_t *os); +int dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive); +int dmu_objset_rename(const char *name, const char *newname, + boolean_t recursive); +int dmu_objset_find(char *name, int func(char *, void *), void *arg, + int flags); +void dmu_objset_byteswap(void *buf, size_t size); + +typedef struct dmu_buf { + uint64_t db_object; /* object that this buffer is part of */ + uint64_t db_offset; /* byte offset in this object */ + uint64_t db_size; /* size of buffer in bytes */ + void *db_data; /* data in buffer */ +} dmu_buf_t; + +typedef void dmu_buf_evict_func_t(struct dmu_buf *db, void *user_ptr); + +/* + * The names of zap entries in the DIRECTORY_OBJECT of the MOS. + */ +#define DMU_POOL_DIRECTORY_OBJECT 1 +#define DMU_POOL_CONFIG "config" +#define DMU_POOL_ROOT_DATASET "root_dataset" +#define DMU_POOL_SYNC_BPLIST "sync_bplist" +#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" +#define DMU_POOL_ERRLOG_LAST "errlog_last" +#define DMU_POOL_SPARES "spares" +#define DMU_POOL_DEFLATE "deflate" +#define DMU_POOL_HISTORY "history" +#define DMU_POOL_PROPS "pool_props" +#define DMU_POOL_L2CACHE "l2cache" + +/* 4x8 zbookmark_t */ +#define DMU_POOL_SCRUB_BOOKMARK "scrub_bookmark" +/* 1x8 zap obj DMU_OT_SCRUB_QUEUE */ +#define DMU_POOL_SCRUB_QUEUE "scrub_queue" +/* 1x8 txg */ +#define DMU_POOL_SCRUB_MIN_TXG "scrub_min_txg" +/* 1x8 txg */ +#define DMU_POOL_SCRUB_MAX_TXG "scrub_max_txg" +/* 1x4 enum scrub_func */ +#define DMU_POOL_SCRUB_FUNC "scrub_func" +/* 1x8 count */ +#define DMU_POOL_SCRUB_ERRORS "scrub_errors" + +/* + * Allocate an object from this objset. The range of object numbers + * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode. + * + * The transaction must be assigned to a txg. The newly allocated + * object will be "held" in the transaction (ie. you can modify the + * newly allocated object in this transaction). + * + * dmu_object_alloc() chooses an object and returns it in *objectp. + * + * dmu_object_claim() allocates a specific object number. If that + * number is already allocated, it fails and returns EEXIST. + * + * Return 0 on success, or ENOSPC or EEXIST as specified above. + */ +uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, + int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); +int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, + int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); +int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, + int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); + +/* + * Free an object from this objset. + * + * The object's data will be freed as well (ie. you don't need to call + * dmu_free(object, 0, -1, tx)). + * + * The object need not be held in the transaction. + * + * If there are any holds on this object's buffers (via dmu_buf_hold()), + * or tx holds on the object (via dmu_tx_hold_object()), you can not + * free it; it fails and returns EBUSY. + * + * If the object is not allocated, it fails and returns ENOENT. + * + * Return 0 on success, or EBUSY or ENOENT as specified above. + */ +int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); + +/* + * Find the next allocated or free object. + * + * The objectp parameter is in-out. It will be updated to be the next + * object which is allocated. Ignore objects which have not been + * modified since txg. + * + * XXX Can only be called on a objset with no dirty data. + * + * Returns 0 on success, or ENOENT if there are no more objects. + */ +int dmu_object_next(objset_t *os, uint64_t *objectp, + boolean_t hole, uint64_t txg); + +/* + * Set the data blocksize for an object. + * + * The object cannot have any blocks allcated beyond the first. If + * the first block is allocated already, the new size must be greater + * than the current block size. If these conditions are not met, + * ENOTSUP will be returned. + * + * Returns 0 on success, or EBUSY if there are any holds on the object + * contents, or ENOTSUP as described above. + */ +int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, + int ibs, dmu_tx_t *tx); + +/* + * Set the checksum property on a dnode. The new checksum algorithm will + * apply to all newly written blocks; existing blocks will not be affected. + */ +void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, + dmu_tx_t *tx); + +/* + * Set the compress property on a dnode. The new compression algorithm will + * apply to all newly written blocks; existing blocks will not be affected. + */ +void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, + dmu_tx_t *tx); + +/* + * Decide how many copies of a given block we should make. Can be from + * 1 to SPA_DVAS_PER_BP. + */ +int dmu_get_replication_level(struct objset_impl *, struct zbookmark *zb, + dmu_object_type_t ot); +/* + * The bonus data is accessed more or less like a regular buffer. + * You must dmu_bonus_hold() to get the buffer, which will give you a + * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus + * data. As with any normal buffer, you must call dmu_buf_read() to + * read db_data, dmu_buf_will_dirty() before modifying it, and the + * object must be held in an assigned transaction before calling + * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus + * buffer as well. You must release your hold with dmu_buf_rele(). + */ +int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **); +int dmu_bonus_max(void); +int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *); + +/* + * Obtain the DMU buffer from the specified object which contains the + * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so + * that it will remain in memory. You must release the hold with + * dmu_buf_rele(). You musn't access the dmu_buf_t after releasing your + * hold. You must have a hold on any dmu_buf_t* you pass to the DMU. + * + * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill + * on the returned buffer before reading or writing the buffer's + * db_data. The comments for those routines describe what particular + * operations are valid after calling them. + * + * The object number must be a valid, allocated object number. + */ +int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, + void *tag, dmu_buf_t **); +void dmu_buf_add_ref(dmu_buf_t *db, void* tag); +void dmu_buf_rele(dmu_buf_t *db, void *tag); +uint64_t dmu_buf_refcount(dmu_buf_t *db); + +/* + * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a + * range of an object. A pointer to an array of dmu_buf_t*'s is + * returned (in *dbpp). + * + * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and + * frees the array. The hold on the array of buffers MUST be released + * with dmu_buf_rele_array. You can NOT release the hold on each buffer + * individually with dmu_buf_rele. + */ +int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, + uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp); +void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag); + +/* + * Returns NULL on success, or the existing user ptr if it's already + * been set. + * + * user_ptr is for use by the user and can be obtained via dmu_buf_get_user(). + * + * user_data_ptr_ptr should be NULL, or a pointer to a pointer which + * will be set to db->db_data when you are allowed to access it. Note + * that db->db_data (the pointer) can change when you do dmu_buf_read(), + * dmu_buf_tryupgrade(), dmu_buf_will_dirty(), or dmu_buf_will_fill(). + * *user_data_ptr_ptr will be set to the new value when it changes. + * + * If non-NULL, pageout func will be called when this buffer is being + * excised from the cache, so that you can clean up the data structure + * pointed to by user_ptr. + * + * dmu_evict_user() will call the pageout func for all buffers in a + * objset with a given pageout func. + */ +void *dmu_buf_set_user(dmu_buf_t *db, void *user_ptr, void *user_data_ptr_ptr, + dmu_buf_evict_func_t *pageout_func); +/* + * set_user_ie is the same as set_user, but request immediate eviction + * when hold count goes to zero. + */ +void *dmu_buf_set_user_ie(dmu_buf_t *db, void *user_ptr, + void *user_data_ptr_ptr, dmu_buf_evict_func_t *pageout_func); +void *dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, + void *user_ptr, void *user_data_ptr_ptr, + dmu_buf_evict_func_t *pageout_func); +void dmu_evict_user(objset_t *os, dmu_buf_evict_func_t *func); + +/* + * Returns the user_ptr set with dmu_buf_set_user(), or NULL if not set. + */ +void *dmu_buf_get_user(dmu_buf_t *db); + +/* + * Indicate that you are going to modify the buffer's data (db_data). + * + * The transaction (tx) must be assigned to a txg (ie. you've called + * dmu_tx_assign()). The buffer's object must be held in the tx + * (ie. you've called dmu_tx_hold_object(tx, db->db_object)). + */ +void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); + +/* + * You must create a transaction, then hold the objects which you will + * (or might) modify as part of this transaction. Then you must assign + * the transaction to a transaction group. Once the transaction has + * been assigned, you can modify buffers which belong to held objects as + * part of this transaction. You can't modify buffers before the + * transaction has been assigned; you can't modify buffers which don't + * belong to objects which this transaction holds; you can't hold + * objects once the transaction has been assigned. You may hold an + * object which you are going to free (with dmu_object_free()), but you + * don't have to. + * + * You can abort the transaction before it has been assigned. + * + * Note that you may hold buffers (with dmu_buf_hold) at any time, + * regardless of transaction state. + */ + +#define DMU_NEW_OBJECT (-1ULL) +#define DMU_OBJECT_END (-1ULL) + +dmu_tx_t *dmu_tx_create(objset_t *os); +void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); +void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, + uint64_t len); +void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, char *name); +void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object); +void dmu_tx_abort(dmu_tx_t *tx); +int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); +void dmu_tx_wait(dmu_tx_t *tx); +void dmu_tx_commit(dmu_tx_t *tx); + +/* + * Free up the data blocks for a defined range of a file. If size is + * zero, the range from offset to end-of-file is freed. + */ +int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, + uint64_t size, dmu_tx_t *tx); +int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset, + uint64_t size); +int dmu_free_object(objset_t *os, uint64_t object); + +/* + * Convenience functions. + * + * Canfail routines will return 0 on success, or an errno if there is a + * nonrecoverable I/O error. + */ +int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + void *buf); +void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + const void *buf, dmu_tx_t *tx); +void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, + dmu_tx_t *tx); +int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); +int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, + dmu_tx_t *tx); +int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, + uint64_t size, struct page *pp, dmu_tx_t *tx); + +extern int zfs_prefetch_disable; + +/* + * Asynchronously try to read in the data. + */ +void dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, + uint64_t len); + +typedef struct dmu_object_info { + /* All sizes are in bytes. */ + uint32_t doi_data_block_size; + uint32_t doi_metadata_block_size; + uint64_t doi_bonus_size; + dmu_object_type_t doi_type; + dmu_object_type_t doi_bonus_type; + uint8_t doi_indirection; /* 2 = dnode->indirect->data */ + uint8_t doi_checksum; + uint8_t doi_compress; + uint8_t doi_pad[5]; + /* Values below are number of 512-byte blocks. */ + uint64_t doi_physical_blks; /* data + metadata */ + uint64_t doi_max_block_offset; +} dmu_object_info_t; + +typedef void arc_byteswap_func_t(void *buf, size_t size); + +typedef struct dmu_object_type_info { + arc_byteswap_func_t *ot_byteswap; + boolean_t ot_metadata; + char *ot_name; +} dmu_object_type_info_t; + +extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES]; + +/* + * Get information on a DMU object. + * + * Return 0 on success or ENOENT if object is not allocated. + * + * If doi is NULL, just indicates whether the object exists. + */ +int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi); +void dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi); +void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi); +void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, + u_longlong_t *nblk512); + +typedef struct dmu_objset_stats { + uint64_t dds_num_clones; /* number of clones of this */ + uint64_t dds_creation_txg; + uint64_t dds_guid; + dmu_objset_type_t dds_type; + uint8_t dds_is_snapshot; + uint8_t dds_inconsistent; + char dds_origin[MAXNAMELEN]; +} dmu_objset_stats_t; + +/* + * Get stats on a dataset. + */ +void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); + +/* + * Add entries to the nvlist for all the objset's properties. See + * zfs_prop_table[] and zfs(1m) for details on the properties. + */ +void dmu_objset_stats(objset_t *os, struct nvlist *nv); + +/* + * Get the space usage statistics for statvfs(). + * + * refdbytes is the amount of space "referenced" by this objset. + * availbytes is the amount of space available to this objset, taking + * into account quotas & reservations, assuming that no other objsets + * use the space first. These values correspond to the 'referenced' and + * 'available' properties, described in the zfs(1m) manpage. + * + * usedobjs and availobjs are the number of objects currently allocated, + * and available. + */ +void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, + uint64_t *usedobjsp, uint64_t *availobjsp); + +/* + * The fsid_guid is a 56-bit ID that can change to avoid collisions. + * (Contrast with the ds_guid which is a 64-bit ID that will never + * change, so there is a small probability that it will collide.) + */ +uint64_t dmu_objset_fsid_guid(objset_t *os); + +int dmu_objset_is_snapshot(objset_t *os); + +extern struct spa *dmu_objset_spa(objset_t *os); +extern struct zilog *dmu_objset_zil(objset_t *os); +extern struct dsl_pool *dmu_objset_pool(objset_t *os); +extern struct dsl_dataset *dmu_objset_ds(objset_t *os); +extern void dmu_objset_name(objset_t *os, char *buf); +extern dmu_objset_type_t dmu_objset_type(objset_t *os); +extern uint64_t dmu_objset_id(objset_t *os); +extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, + uint64_t *id, uint64_t *offp, boolean_t *case_conflict); +extern int dmu_snapshot_realname(objset_t *os, char *name, char *real, + int maxlen, boolean_t *conflict); +extern int dmu_dir_list_next(objset_t *os, int namelen, char *name, + uint64_t *idp, uint64_t *offp); +extern void dmu_objset_set_user(objset_t *os, void *user_ptr); +extern void *dmu_objset_get_user(objset_t *os); + +/* + * Return the txg number for the given assigned transaction. + */ +uint64_t dmu_tx_get_txg(dmu_tx_t *tx); + +/* + * Synchronous write. + * If a parent zio is provided this function initiates a write on the + * provided buffer as a child of the parent zio. + * In the absence of a parent zio, the write is completed synchronously. + * At write completion, blk is filled with the bp of the written block. + * Note that while the data covered by this function will be on stable + * storage when the write completes this new data does not become a + * permanent part of the file until the associated transaction commits. + */ +typedef void dmu_sync_cb_t(dmu_buf_t *db, void *arg); +int dmu_sync(struct zio *zio, dmu_buf_t *db, + struct blkptr *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg); + +/* + * Find the next hole or data block in file starting at *off + * Return found offset in *off. Return ESRCH for end of file. + */ +int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, + uint64_t *off); + +/* + * Initial setup and final teardown. + */ +extern void dmu_init(void); +extern void dmu_fini(void); + +typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp, + uint64_t object, uint64_t offset, int len); +void dmu_traverse_objset(objset_t *os, uint64_t txg_start, + dmu_traverse_cb_t cb, void *arg); + +int dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, + struct vnode *vp, offset_t *off); + +typedef struct dmu_recv_cookie { + /* + * This structure is opaque! + * + * If logical and real are different, we are recving the stream + * into the "real" temporary clone, and then switching it with + * the "logical" target. + */ + struct dsl_dataset *drc_logical_ds; + struct dsl_dataset *drc_real_ds; + struct drr_begin *drc_drrb; + char *drc_tosnap; + boolean_t drc_newfs; + boolean_t drc_force; +} dmu_recv_cookie_t; + +int dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *, + boolean_t force, objset_t *origin, boolean_t online, dmu_recv_cookie_t *); +int dmu_recv_stream(dmu_recv_cookie_t *drc, struct vnode *vp, offset_t *voffp); +int dmu_recv_end(dmu_recv_cookie_t *drc); +void dmu_recv_abort_cleanup(dmu_recv_cookie_t *drc); + +/* CRC64 table */ +#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ +extern uint64_t zfs_crc64_table[256]; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DMU_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_impl.h new file mode 100644 index 000000000000..96ce688e1551 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_impl.h @@ -0,0 +1,239 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DMU_IMPL_H +#define _SYS_DMU_IMPL_H + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * This is the locking strategy for the DMU. Numbers in parenthesis are + * cases that use that lock order, referenced below: + * + * ARC is self-contained + * bplist is self-contained + * refcount is self-contained + * txg is self-contained (hopefully!) + * zst_lock + * zf_rwlock + * + * XXX try to improve evicting path? + * + * dp_config_rwlock > os_obj_lock > dn_struct_rwlock > + * dn_dbufs_mtx > hash_mutexes > db_mtx > dd_lock > leafs + * + * dp_config_rwlock + * must be held before: everything + * protects dd namespace changes + * protects property changes globally + * held from: + * dsl_dir_open/r: + * dsl_dir_create_sync/w: + * dsl_dir_sync_destroy/w: + * dsl_dir_rename_sync/w: + * dsl_prop_changed_notify/r: + * + * os_obj_lock + * must be held before: + * everything except dp_config_rwlock + * protects os_obj_next + * held from: + * dmu_object_alloc: dn_dbufs_mtx, db_mtx, hash_mutexes, dn_struct_rwlock + * + * dn_struct_rwlock + * must be held before: + * everything except dp_config_rwlock and os_obj_lock + * protects structure of dnode (eg. nlevels) + * db_blkptr can change when syncing out change to nlevels + * dn_maxblkid + * dn_nlevels + * dn_*blksz* + * phys nlevels, maxblkid, physical blkptr_t's (?) + * held from: + * callers of dbuf_read_impl, dbuf_hold[_impl], dbuf_prefetch + * dmu_object_info_from_dnode: dn_dirty_mtx (dn_datablksz) + * dmu_tx_count_free: + * dbuf_read_impl: db_mtx, dmu_zfetch() + * dmu_zfetch: zf_rwlock/r, zst_lock, dbuf_prefetch() + * dbuf_new_size: db_mtx + * dbuf_dirty: db_mtx + * dbuf_findbp: (callers, phys? - the real need) + * dbuf_create: dn_dbufs_mtx, hash_mutexes, db_mtx (phys?) + * dbuf_prefetch: dn_dirty_mtx, hash_mutexes, db_mtx, dn_dbufs_mtx + * dbuf_hold_impl: hash_mutexes, db_mtx, dn_dbufs_mtx, dbuf_findbp() + * dnode_sync/w (increase_indirection): db_mtx (phys) + * dnode_set_blksz/w: dn_dbufs_mtx (dn_*blksz*) + * dnode_new_blkid/w: (dn_maxblkid) + * dnode_free_range/w: dn_dirty_mtx (dn_maxblkid) + * dnode_next_offset: (phys) + * + * dn_dbufs_mtx + * must be held before: + * db_mtx, hash_mutexes + * protects: + * dn_dbufs + * dn_evicted + * held from: + * dmu_evict_user: db_mtx (dn_dbufs) + * dbuf_free_range: db_mtx (dn_dbufs) + * dbuf_remove_ref: db_mtx, callees: + * dbuf_hash_remove: hash_mutexes, db_mtx + * dbuf_create: hash_mutexes, db_mtx (dn_dbufs) + * dnode_set_blksz: (dn_dbufs) + * + * hash_mutexes (global) + * must be held before: + * db_mtx + * protects dbuf_hash_table (global) and db_hash_next + * held from: + * dbuf_find: db_mtx + * dbuf_hash_insert: db_mtx + * dbuf_hash_remove: db_mtx + * + * db_mtx (meta-leaf) + * must be held before: + * dn_mtx, dn_dirty_mtx, dd_lock (leaf mutexes) + * protects: + * db_state + * db_holds + * db_buf + * db_changed + * db_data_pending + * db_dirtied + * db_link + * db_dirty_node (??) + * db_dirtycnt + * db_d.* + * db.* + * held from: + * dbuf_dirty: dn_mtx, dn_dirty_mtx + * dbuf_dirty->dsl_dir_willuse_space: dd_lock + * dbuf_dirty->dbuf_new_block->dsl_dataset_block_freeable: dd_lock + * dbuf_undirty: dn_dirty_mtx (db_d) + * dbuf_write_done: dn_dirty_mtx (db_state) + * dbuf_* + * dmu_buf_update_user: none (db_d) + * dmu_evict_user: none (db_d) (maybe can eliminate) + * dbuf_find: none (db_holds) + * dbuf_hash_insert: none (db_holds) + * dmu_buf_read_array_impl: none (db_state, db_changed) + * dmu_sync: none (db_dirty_node, db_d) + * dnode_reallocate: none (db) + * + * dn_mtx (leaf) + * protects: + * dn_dirty_dbufs + * dn_ranges + * phys accounting + * dn_allocated_txg + * dn_free_txg + * dn_assigned_txg + * dd_assigned_tx + * dn_notxholds + * dn_dirtyctx + * dn_dirtyctx_firstset + * (dn_phys copy fields?) + * (dn_phys contents?) + * held from: + * dnode_* + * dbuf_dirty: none + * dbuf_sync: none (phys accounting) + * dbuf_undirty: none (dn_ranges, dn_dirty_dbufs) + * dbuf_write_done: none (phys accounting) + * dmu_object_info_from_dnode: none (accounting) + * dmu_tx_commit: none + * dmu_tx_hold_object_impl: none + * dmu_tx_try_assign: dn_notxholds(cv) + * dmu_tx_unassign: none + * + * dd_lock + * must be held before: + * ds_lock + * ancestors' dd_lock + * protects: + * dd_prop_cbs + * dd_sync_* + * dd_used_bytes + * dd_tempreserved + * dd_space_towrite + * dd_myname + * dd_phys accounting? + * held from: + * dsl_dir_* + * dsl_prop_changed_notify: none (dd_prop_cbs) + * dsl_prop_register: none (dd_prop_cbs) + * dsl_prop_unregister: none (dd_prop_cbs) + * dsl_dataset_block_freeable: none (dd_sync_*) + * + * os_lock (leaf) + * protects: + * os_dirty_dnodes + * os_free_dnodes + * os_dnodes + * os_downgraded_dbufs + * dn_dirtyblksz + * dn_dirty_link + * held from: + * dnode_create: none (os_dnodes) + * dnode_destroy: none (os_dnodes) + * dnode_setdirty: none (dn_dirtyblksz, os_*_dnodes) + * dnode_free: none (dn_dirtyblksz, os_*_dnodes) + * + * ds_lock + * protects: + * ds_user_ptr + * ds_user_evice_func + * ds_open_refcount + * ds_snapname + * ds_phys accounting + * ds_reserved + * held from: + * dsl_dataset_* + * + * dr_mtx (leaf) + * protects: + * dr_children + * held from: + * dbuf_dirty + * dbuf_undirty + * dbuf_sync_indirect + * dnode_new_blkid + */ + +struct objset; +struct dmu_pool; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DMU_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_objset.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_objset.h new file mode 100644 index 000000000000..1d65727808c3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_objset.h @@ -0,0 +1,135 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DMU_OBJSET_H +#define _SYS_DMU_OBJSET_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dsl_dataset; +struct dmu_tx; +struct objset_impl; + +typedef struct objset_phys { + dnode_phys_t os_meta_dnode; + zil_header_t os_zil_header; + uint64_t os_type; + char os_pad[1024 - sizeof (dnode_phys_t) - sizeof (zil_header_t) - + sizeof (uint64_t)]; +} objset_phys_t; + +struct objset { + struct objset_impl *os; + int os_mode; +}; + +typedef struct objset_impl { + /* Immutable: */ + struct dsl_dataset *os_dsl_dataset; + spa_t *os_spa; + arc_buf_t *os_phys_buf; + objset_phys_t *os_phys; + dnode_t *os_meta_dnode; + zilog_t *os_zil; + objset_t os; + uint8_t os_checksum; /* can change, under dsl_dir's locks */ + uint8_t os_compress; /* can change, under dsl_dir's locks */ + uint8_t os_copies; /* can change, under dsl_dir's locks */ + uint8_t os_primary_cache; /* can change, under dsl_dir's locks */ + uint8_t os_secondary_cache; /* can change, under dsl_dir's locks */ + + /* no lock needed: */ + struct dmu_tx *os_synctx; /* XXX sketchy */ + blkptr_t *os_rootbp; + zil_header_t os_zil_header; + + /* Protected by os_obj_lock */ + kmutex_t os_obj_lock; + uint64_t os_obj_next; + + /* Protected by os_lock */ + kmutex_t os_lock; + list_t os_dirty_dnodes[TXG_SIZE]; + list_t os_free_dnodes[TXG_SIZE]; + list_t os_dnodes; + list_t os_downgraded_dbufs; + + /* stuff we store for the user */ + kmutex_t os_user_ptr_lock; + void *os_user_ptr; +} objset_impl_t; + +#define DMU_META_DNODE_OBJECT 0 + +#define DMU_OS_IS_L2CACHEABLE(os) \ + ((os)->os_secondary_cache == ZFS_CACHE_ALL || \ + (os)->os_secondary_cache == ZFS_CACHE_METADATA) + +/* called from zpl */ +int dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, + objset_t **osp); +void dmu_objset_close(objset_t *os); +int dmu_objset_create(const char *name, dmu_objset_type_t type, + objset_t *clone_parent, uint64_t flags, + void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); +int dmu_objset_destroy(const char *name); +int dmu_objset_rollback(objset_t *os); +int dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive); +void dmu_objset_stats(objset_t *os, nvlist_t *nv); +void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); +void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, + uint64_t *usedobjsp, uint64_t *availobjsp); +uint64_t dmu_objset_fsid_guid(objset_t *os); +int dmu_objset_find(char *name, int func(char *, void *), void *arg, + int flags); +int dmu_objset_find_spa(spa_t *spa, const char *name, + int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags); +int dmu_objset_prefetch(char *name, void *arg); +void dmu_objset_byteswap(void *buf, size_t size); +int dmu_objset_evict_dbufs(objset_t *os); + +/* called from dsl */ +void dmu_objset_sync(objset_impl_t *os, zio_t *zio, dmu_tx_t *tx); +objset_impl_t *dmu_objset_create_impl(spa_t *spa, struct dsl_dataset *ds, + blkptr_t *bp, dmu_objset_type_t type, dmu_tx_t *tx); +int dmu_objset_open_impl(spa_t *spa, struct dsl_dataset *ds, blkptr_t *bp, + objset_impl_t **osip); +void dmu_objset_evict(struct dsl_dataset *ds, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DMU_OBJSET_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_traverse.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_traverse.h new file mode 100644 index 000000000000..3e026891153c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_traverse.h @@ -0,0 +1,57 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DMU_TRAVERSE_H +#define _SYS_DMU_TRAVERSE_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dnode_phys; +struct dsl_dataset; + +typedef int (blkptr_cb_t)(spa_t *spa, blkptr_t *bp, + const zbookmark_t *zb, const struct dnode_phys *dnp, void *arg); + +#define TRAVERSE_PRE (1<<0) +#define TRAVERSE_POST (1<<1) +#define TRAVERSE_PREFETCH_METADATA (1<<2) +#define TRAVERSE_PREFETCH_DATA (1<<3) +#define TRAVERSE_PREFETCH (TRAVERSE_PREFETCH_METADATA | TRAVERSE_PREFETCH_DATA) + +int traverse_dataset(struct dsl_dataset *ds, uint64_t txg_start, + int flags, blkptr_cb_t func, void *arg); +int traverse_pool(spa_t *spa, blkptr_cb_t func, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DMU_TRAVERSE_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_tx.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_tx.h new file mode 100644 index 000000000000..2727daaaa76b --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_tx.h @@ -0,0 +1,139 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DMU_TX_H +#define _SYS_DMU_TX_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dmu_buf_impl; +struct dmu_tx_hold; +struct dnode_link; +struct dsl_pool; +struct dnode; +struct dsl_dir; + +struct dmu_tx { + /* + * No synchronization is needed because a tx can only be handled + * by one thread. + */ + list_t tx_holds; /* list of dmu_tx_hold_t */ + objset_t *tx_objset; + struct dsl_dir *tx_dir; + struct dsl_pool *tx_pool; + uint64_t tx_txg; + uint64_t tx_lastsnap_txg; + uint64_t tx_lasttried_txg; + txg_handle_t tx_txgh; + void *tx_tempreserve_cookie; + struct dmu_tx_hold *tx_needassign_txh; + uint8_t tx_anyobj; + int tx_err; +#ifdef ZFS_DEBUG + uint64_t tx_space_towrite; + uint64_t tx_space_tofree; + uint64_t tx_space_tooverwrite; + uint64_t tx_space_tounref; + refcount_t tx_space_written; + refcount_t tx_space_freed; +#endif +}; + +enum dmu_tx_hold_type { + THT_NEWOBJECT, + THT_WRITE, + THT_BONUS, + THT_FREE, + THT_ZAP, + THT_SPACE, + THT_NUMTYPES +}; + +typedef struct dmu_tx_hold { + dmu_tx_t *txh_tx; + list_node_t txh_node; + struct dnode *txh_dnode; + uint64_t txh_space_towrite; + uint64_t txh_space_tofree; + uint64_t txh_space_tooverwrite; + uint64_t txh_space_tounref; + uint64_t txh_memory_tohold; + uint64_t txh_fudge; +#ifdef ZFS_DEBUG + enum dmu_tx_hold_type txh_type; + uint64_t txh_arg1; + uint64_t txh_arg2; +#endif +} dmu_tx_hold_t; + + +/* + * These routines are defined in dmu.h, and are called by the user. + */ +dmu_tx_t *dmu_tx_create(objset_t *dd); +int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); +void dmu_tx_commit(dmu_tx_t *tx); +void dmu_tx_abort(dmu_tx_t *tx); +uint64_t dmu_tx_get_txg(dmu_tx_t *tx); +void dmu_tx_wait(dmu_tx_t *tx); + +/* + * These routines are defined in dmu_spa.h, and are called by the SPA. + */ +extern dmu_tx_t *dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg); + +/* + * These routines are only called by the DMU. + */ +dmu_tx_t *dmu_tx_create_dd(dsl_dir_t *dd); +int dmu_tx_is_syncing(dmu_tx_t *tx); +int dmu_tx_private_ok(dmu_tx_t *tx); +void dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object); +void dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta); +void dmu_tx_dirty_buf(dmu_tx_t *tx, struct dmu_buf_impl *db); +int dmu_tx_holds(dmu_tx_t *tx, uint64_t object); +void dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space); + +#ifdef ZFS_DEBUG +#define DMU_TX_DIRTY_BUF(tx, db) dmu_tx_dirty_buf(tx, db) +#else +#define DMU_TX_DIRTY_BUF(tx, db) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DMU_TX_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_zfetch.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_zfetch.h new file mode 100644 index 000000000000..c94bced933af --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dmu_zfetch.h @@ -0,0 +1,75 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _DFETCH_H +#define _DFETCH_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern uint64_t zfetch_array_rd_sz; + +struct dnode; /* so we can reference dnode */ + +typedef enum zfetch_dirn { + ZFETCH_FORWARD = 1, /* prefetch increasing block numbers */ + ZFETCH_BACKWARD = -1 /* prefetch decreasing block numbers */ +} zfetch_dirn_t; + +typedef struct zstream { + uint64_t zst_offset; /* offset of starting block in range */ + uint64_t zst_len; /* length of range, in blocks */ + zfetch_dirn_t zst_direction; /* direction of prefetch */ + uint64_t zst_stride; /* length of stride, in blocks */ + uint64_t zst_ph_offset; /* prefetch offset, in blocks */ + uint64_t zst_cap; /* prefetch limit (cap), in blocks */ + kmutex_t zst_lock; /* protects stream */ + clock_t zst_last; /* lbolt of last prefetch */ + avl_node_t zst_node; /* embed avl node here */ +} zstream_t; + +typedef struct zfetch { + krwlock_t zf_rwlock; /* protects zfetch structure */ + list_t zf_stream; /* AVL tree of zstream_t's */ + struct dnode *zf_dnode; /* dnode that owns this zfetch */ + uint32_t zf_stream_cnt; /* # of active streams */ + uint64_t zf_alloc_fail; /* # of failed attempts to alloc strm */ +} zfetch_t; + +void dmu_zfetch_init(zfetch_t *, struct dnode *); +void dmu_zfetch_rele(zfetch_t *); +void dmu_zfetch(zfetch_t *, uint64_t, uint64_t, int); + + +#ifdef __cplusplus +} +#endif + +#endif /* _DFETCH_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dnode.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dnode.h new file mode 100644 index 000000000000..c79ff48a60c5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dnode.h @@ -0,0 +1,275 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DNODE_H +#define _SYS_DNODE_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * dnode_hold() flags. + */ +#define DNODE_MUST_BE_ALLOCATED 1 +#define DNODE_MUST_BE_FREE 2 + +/* + * dnode_next_offset() flags. + */ +#define DNODE_FIND_HOLE 1 +#define DNODE_FIND_BACKWARDS 2 +#define DNODE_FIND_HAVELOCK 4 + +/* + * Fixed constants. + */ +#define DNODE_SHIFT 9 /* 512 bytes */ +#define DN_MIN_INDBLKSHIFT 10 /* 1k */ +#define DN_MAX_INDBLKSHIFT 14 /* 16k */ +#define DNODE_BLOCK_SHIFT 14 /* 16k */ +#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ +#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ +#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ + +/* + * Derived constants. + */ +#define DNODE_SIZE (1 << DNODE_SHIFT) +#define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT) +#define DN_MAX_BONUSLEN (DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT)) +#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT) +#define DN_ZERO_BONUSLEN (DN_MAX_BONUSLEN + 1) + +#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT) +#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) +#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) + +/* The +2 here is a cheesy way to round up */ +#define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \ + (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT))) + +#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ + (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) + +#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \ + (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT) + +#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift)) + +struct dmu_buf_impl; +struct objset_impl; +struct zio; + +enum dnode_dirtycontext { + DN_UNDIRTIED, + DN_DIRTY_OPEN, + DN_DIRTY_SYNC +}; + +/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ +#define DNODE_FLAG_USED_BYTES (1<<0) + +typedef struct dnode_phys { + uint8_t dn_type; /* dmu_object_type_t */ + uint8_t dn_indblkshift; /* ln2(indirect block size) */ + uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ + uint8_t dn_nblkptr; /* length of dn_blkptr */ + uint8_t dn_bonustype; /* type of data in bonus buffer */ + uint8_t dn_checksum; /* ZIO_CHECKSUM type */ + uint8_t dn_compress; /* ZIO_COMPRESS type */ + uint8_t dn_flags; /* DNODE_FLAG_* */ + uint16_t dn_datablkszsec; /* data block size in 512b sectors */ + uint16_t dn_bonuslen; /* length of dn_bonus */ + uint8_t dn_pad2[4]; + + /* accounting is protected by dn_dirty_mtx */ + uint64_t dn_maxblkid; /* largest allocated block ID */ + uint64_t dn_used; /* bytes (or sectors) of disk space */ + + uint64_t dn_pad3[4]; + + blkptr_t dn_blkptr[1]; + uint8_t dn_bonus[DN_MAX_BONUSLEN]; +} dnode_phys_t; + +typedef struct dnode { + /* + * dn_struct_rwlock protects the structure of the dnode, + * including the number of levels of indirection (dn_nlevels), + * dn_maxblkid, and dn_next_* + */ + krwlock_t dn_struct_rwlock; + + /* + * Our link on dataset's dd_dnodes list. + * Protected by dd_accounting_mtx. + */ + list_node_t dn_link; + + /* immutable: */ + struct objset_impl *dn_objset; + uint64_t dn_object; + struct dmu_buf_impl *dn_dbuf; + dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */ + + /* + * Copies of stuff in dn_phys. They're valid in the open + * context (eg. even before the dnode is first synced). + * Where necessary, these are protected by dn_struct_rwlock. + */ + dmu_object_type_t dn_type; /* object type */ + uint16_t dn_bonuslen; /* bonus length */ + uint8_t dn_bonustype; /* bonus type */ + uint8_t dn_nblkptr; /* number of blkptrs (immutable) */ + uint8_t dn_checksum; /* ZIO_CHECKSUM type */ + uint8_t dn_compress; /* ZIO_COMPRESS type */ + uint8_t dn_nlevels; + uint8_t dn_indblkshift; + uint8_t dn_datablkshift; /* zero if blksz not power of 2! */ + uint16_t dn_datablkszsec; /* in 512b sectors */ + uint32_t dn_datablksz; /* in bytes */ + uint64_t dn_maxblkid; + uint8_t dn_next_nlevels[TXG_SIZE]; + uint8_t dn_next_indblkshift[TXG_SIZE]; + uint16_t dn_next_bonuslen[TXG_SIZE]; + uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */ + + /* protected by os_lock: */ + list_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */ + + /* protected by dn_mtx: */ + kmutex_t dn_mtx; + list_t dn_dirty_records[TXG_SIZE]; + avl_tree_t dn_ranges[TXG_SIZE]; + uint64_t dn_allocated_txg; + uint64_t dn_free_txg; + uint64_t dn_assigned_txg; + kcondvar_t dn_notxholds; + enum dnode_dirtycontext dn_dirtyctx; + uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */ + + /* protected by own devices */ + refcount_t dn_tx_holds; + refcount_t dn_holds; + + kmutex_t dn_dbufs_mtx; + list_t dn_dbufs; /* linked list of descendent dbuf_t's */ + struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */ + + /* parent IO for current sync write */ + zio_t *dn_zio; + + /* holds prefetch structure */ + struct zfetch dn_zfetch; +} dnode_t; + +typedef struct free_range { + avl_node_t fr_node; + uint64_t fr_blkid; + uint64_t fr_nblks; +} free_range_t; + +dnode_t *dnode_special_open(struct objset_impl *dd, dnode_phys_t *dnp, + uint64_t object); +void dnode_special_close(dnode_t *dn); + +void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx); +int dnode_hold(struct objset_impl *dd, uint64_t object, + void *ref, dnode_t **dnp); +int dnode_hold_impl(struct objset_impl *dd, uint64_t object, int flag, + void *ref, dnode_t **dnp); +boolean_t dnode_add_ref(dnode_t *dn, void *ref); +void dnode_rele(dnode_t *dn, void *ref); +void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx); +void dnode_sync(dnode_t *dn, dmu_tx_t *tx); +void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); +void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); +void dnode_free(dnode_t *dn, dmu_tx_t *tx); +void dnode_byteswap(dnode_phys_t *dnp); +void dnode_buf_byteswap(void *buf, size_t size); +void dnode_verify(dnode_t *dn); +int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx); +uint64_t dnode_current_max_length(dnode_t *dn); +void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx); +void dnode_clear_range(dnode_t *dn, uint64_t blkid, + uint64_t nblks, dmu_tx_t *tx); +void dnode_diduse_space(dnode_t *dn, int64_t space); +void dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx); +void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t); +uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid); +void dnode_init(void); +void dnode_fini(void); +int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off, + int minlvl, uint64_t blkfill, uint64_t txg); +void dnode_evict_dbufs(dnode_t *dn); + +#ifdef ZFS_DEBUG + +/* + * There should be a ## between the string literal and fmt, to make it + * clear that we're joining two strings together, but that piece of shit + * gcc doesn't support that preprocessor token. + */ +#define dprintf_dnode(dn, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char __db_buf[32]; \ + uint64_t __db_obj = (dn)->dn_object; \ + if (__db_obj == DMU_META_DNODE_OBJECT) \ + (void) strcpy(__db_buf, "mdn"); \ + else \ + (void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \ + (u_longlong_t)__db_obj);\ + dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \ + __db_buf, __VA_ARGS__); \ + } \ +_NOTE(CONSTCOND) } while (0) + +#define DNODE_VERIFY(dn) dnode_verify(dn) +#define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx) + +#else + +#define dprintf_dnode(db, fmt, ...) +#define DNODE_VERIFY(dn) +#define FREE_VERIFY(db, start, end, tx) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DNODE_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dataset.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dataset.h new file mode 100644 index 000000000000..8665aec2dda8 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dataset.h @@ -0,0 +1,239 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_DATASET_H +#define _SYS_DSL_DATASET_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dsl_dataset; +struct dsl_dir; +struct dsl_pool; + +typedef void dsl_dataset_evict_func_t(struct dsl_dataset *, void *); + +#define DS_FLAG_INCONSISTENT (1ULL<<0) +#define DS_IS_INCONSISTENT(ds) \ + ((ds)->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) +/* + * NB: nopromote can not yet be set, but we want support for it in this + * on-disk version, so that we don't need to upgrade for it later. It + * will be needed when we implement 'zfs split' (where the split off + * clone should not be promoted). + */ +#define DS_FLAG_NOPROMOTE (1ULL<<1) + +/* + * DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly + * calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE, + * refquota/refreservations). + */ +#define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2) + +/* + * DS_FLAG_CI_DATASET is set if the dataset contains a file system whose + * name lookups should be performed case-insensitively. + */ +#define DS_FLAG_CI_DATASET (1ULL<<16) + +typedef struct dsl_dataset_phys { + uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */ + uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */ + uint64_t ds_prev_snap_txg; + uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */ + uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */ + uint64_t ds_num_children; /* clone/snap children; ==0 for head */ + uint64_t ds_creation_time; /* seconds since 1970 */ + uint64_t ds_creation_txg; + uint64_t ds_deadlist_obj; /* DMU_OT_BPLIST */ + uint64_t ds_used_bytes; + uint64_t ds_compressed_bytes; + uint64_t ds_uncompressed_bytes; + uint64_t ds_unique_bytes; /* only relevant to snapshots */ + /* + * The ds_fsid_guid is a 56-bit ID that can change to avoid + * collisions. The ds_guid is a 64-bit ID that will never + * change, so there is a small probability that it will collide. + */ + uint64_t ds_fsid_guid; + uint64_t ds_guid; + uint64_t ds_flags; /* DS_FLAG_* */ + blkptr_t ds_bp; + uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */ + uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */ + uint64_t ds_pad[6]; /* pad out to 320 bytes for good measure */ +} dsl_dataset_phys_t; + +typedef struct dsl_dataset { + /* Immutable: */ + struct dsl_dir *ds_dir; + dsl_dataset_phys_t *ds_phys; + dmu_buf_t *ds_dbuf; + uint64_t ds_object; + uint64_t ds_fsid_guid; + + /* only used in syncing context, only valid for non-snapshots: */ + struct dsl_dataset *ds_prev; + uint64_t ds_origin_txg; + + /* has internal locking: */ + bplist_t ds_deadlist; + + /* protected by lock on pool's dp_dirty_datasets list */ + txg_node_t ds_dirty_link; + list_node_t ds_synced_link; + + /* + * ds_phys->ds_ is also protected by ds_lock. + * Protected by ds_lock: + */ + kmutex_t ds_lock; + void *ds_user_ptr; + dsl_dataset_evict_func_t *ds_user_evict_func; + + /* + * ds_owner is protected by the ds_rwlock and the ds_lock + */ + krwlock_t ds_rwlock; + kcondvar_t ds_exclusive_cv; + void *ds_owner; + + /* no locking; only for making guesses */ + uint64_t ds_trysnap_txg; + + /* for objset_open() */ + kmutex_t ds_opening_lock; + + uint64_t ds_reserved; /* cached refreservation */ + uint64_t ds_quota; /* cached refquota */ + + /* Protected by ds_lock; keep at end of struct for better locality */ + char ds_snapname[MAXNAMELEN]; +} dsl_dataset_t; + +#define dsl_dataset_is_snapshot(ds) \ + ((ds)->ds_phys->ds_num_children != 0) + +#define DS_UNIQUE_IS_ACCURATE(ds) \ + (((ds)->ds_phys->ds_flags & DS_FLAG_UNIQUE_ACCURATE) != 0) + +int dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp); +int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj, + void *tag, dsl_dataset_t **); +int dsl_dataset_own(const char *name, int flags, void *owner, + dsl_dataset_t **dsp); +int dsl_dataset_own_obj(struct dsl_pool *dp, uint64_t dsobj, + int flags, void *owner, dsl_dataset_t **); +void dsl_dataset_name(dsl_dataset_t *ds, char *name); +void dsl_dataset_rele(dsl_dataset_t *ds, void *tag); +void dsl_dataset_disown(dsl_dataset_t *ds, void *owner); +void dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag); +boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, + void *owner); +void dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner); +uint64_t dsl_dataset_create_sync(dsl_dir_t *pds, const char *lastname, + dsl_dataset_t *origin, uint64_t flags, cred_t *, dmu_tx_t *); +uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, + uint64_t flags, dmu_tx_t *tx); +int dsl_dataset_destroy(dsl_dataset_t *ds, void *tag); +int dsl_snapshots_destroy(char *fsname, char *snapname); +dsl_checkfunc_t dsl_dataset_destroy_check; +dsl_syncfunc_t dsl_dataset_destroy_sync; +dsl_checkfunc_t dsl_dataset_snapshot_check; +dsl_syncfunc_t dsl_dataset_snapshot_sync; +int dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost); +int dsl_dataset_rename(char *name, const char *newname, boolean_t recursive); +int dsl_dataset_promote(const char *name); +int dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, + boolean_t force); + +void *dsl_dataset_set_user_ptr(dsl_dataset_t *ds, + void *p, dsl_dataset_evict_func_t func); +void *dsl_dataset_get_user_ptr(dsl_dataset_t *ds); + +blkptr_t *dsl_dataset_get_blkptr(dsl_dataset_t *ds); +void dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx); + +spa_t *dsl_dataset_get_spa(dsl_dataset_t *ds); + +boolean_t dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds); + +void dsl_dataset_sync(dsl_dataset_t *os, zio_t *zio, dmu_tx_t *tx); + +void dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx); +int dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio, + dmu_tx_t *tx); +int dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth); +uint64_t dsl_dataset_prev_snap_txg(dsl_dataset_t *ds); + +void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx); +void dsl_dataset_stats(dsl_dataset_t *os, nvlist_t *nv); +void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat); +void dsl_dataset_space(dsl_dataset_t *ds, + uint64_t *refdbytesp, uint64_t *availbytesp, + uint64_t *usedobjsp, uint64_t *availobjsp); +uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds); + +int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf); + +int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, + uint64_t asize, uint64_t inflight, uint64_t *used, + uint64_t *ref_rsrv); +int dsl_dataset_set_quota(const char *dsname, uint64_t quota); +void dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, + dmu_tx_t *tx); +int dsl_dataset_set_reservation(const char *dsname, uint64_t reservation); +void dsl_dataset_set_flags(dsl_dataset_t *ds, uint64_t flags); +int64_t dsl_dataset_new_refreservation(dsl_dataset_t *ds, uint64_t reservation, + dmu_tx_t *tx); + +#ifdef ZFS_DEBUG +#define dprintf_ds(ds, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char *__ds_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); \ + dsl_dataset_name(ds, __ds_name); \ + dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \ + kmem_free(__ds_name, MAXNAMELEN); \ + } \ +_NOTE(CONSTCOND) } while (0) +#else +#define dprintf_ds(dd, fmt, ...) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_DATASET_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_deleg.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_deleg.h new file mode 100644 index 000000000000..a29e44e67d0c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_deleg.h @@ -0,0 +1,73 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_DELEG_H +#define _SYS_DSL_DELEG_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZFS_DELEG_PERM_NONE "" +#define ZFS_DELEG_PERM_CREATE "create" +#define ZFS_DELEG_PERM_DESTROY "destroy" +#define ZFS_DELEG_PERM_SNAPSHOT "snapshot" +#define ZFS_DELEG_PERM_ROLLBACK "rollback" +#define ZFS_DELEG_PERM_CLONE "clone" +#define ZFS_DELEG_PERM_PROMOTE "promote" +#define ZFS_DELEG_PERM_RENAME "rename" +#define ZFS_DELEG_PERM_MOUNT "mount" +#define ZFS_DELEG_PERM_SHARE "share" +#define ZFS_DELEG_PERM_SEND "send" +#define ZFS_DELEG_PERM_RECEIVE "receive" +#define ZFS_DELEG_PERM_ALLOW "allow" +#define ZFS_DELEG_PERM_USERPROP "userprop" +#define ZFS_DELEG_PERM_VSCAN "vscan" + +/* + * Note: the names of properties that are marked delegatable are also + * valid delegated permissions + */ + +int dsl_deleg_get(const char *ddname, nvlist_t **nvp); +int dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset); +int dsl_deleg_access(const char *ddname, const char *perm, cred_t *cr); +void dsl_deleg_set_create_perms(dsl_dir_t *dd, dmu_tx_t *tx, cred_t *cr); +int dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr); +int dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr); +int dsl_deleg_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx); +boolean_t dsl_delegation_on(objset_t *os); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_DELEG_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dir.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dir.h new file mode 100644 index 000000000000..86b9636ceaab --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_dir.h @@ -0,0 +1,160 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_DIR_H +#define _SYS_DSL_DIR_H + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dsl_dataset; + +typedef enum dd_used { + DD_USED_HEAD, + DD_USED_SNAP, + DD_USED_CHILD, + DD_USED_CHILD_RSRV, + DD_USED_REFRSRV, + DD_USED_NUM +} dd_used_t; + +#define DD_FLAG_USED_BREAKDOWN (1<<0) + +typedef struct dsl_dir_phys { + uint64_t dd_creation_time; /* not actually used */ + uint64_t dd_head_dataset_obj; + uint64_t dd_parent_obj; + uint64_t dd_origin_obj; + uint64_t dd_child_dir_zapobj; + /* + * how much space our children are accounting for; for leaf + * datasets, == physical space used by fs + snaps + */ + uint64_t dd_used_bytes; + uint64_t dd_compressed_bytes; + uint64_t dd_uncompressed_bytes; + /* Administrative quota setting */ + uint64_t dd_quota; + /* Administrative reservation setting */ + uint64_t dd_reserved; + uint64_t dd_props_zapobj; + uint64_t dd_deleg_zapobj; /* dataset delegation permissions */ + uint64_t dd_flags; + uint64_t dd_used_breakdown[DD_USED_NUM]; + uint64_t dd_pad[14]; /* pad out to 256 bytes for good measure */ +} dsl_dir_phys_t; + +struct dsl_dir { + /* These are immutable; no lock needed: */ + uint64_t dd_object; + dsl_dir_phys_t *dd_phys; + dmu_buf_t *dd_dbuf; + dsl_pool_t *dd_pool; + + /* protected by lock on pool's dp_dirty_dirs list */ + txg_node_t dd_dirty_link; + + /* protected by dp_config_rwlock */ + dsl_dir_t *dd_parent; + + /* Protected by dd_lock */ + kmutex_t dd_lock; + list_t dd_prop_cbs; /* list of dsl_prop_cb_record_t's */ + + /* gross estimate of space used by in-flight tx's */ + uint64_t dd_tempreserved[TXG_SIZE]; + /* amount of space we expect to write; == amount of dirty data */ + int64_t dd_space_towrite[TXG_SIZE]; + + /* protected by dd_lock; keep at end of struct for better locality */ + char dd_myname[MAXNAMELEN]; +}; + +void dsl_dir_close(dsl_dir_t *dd, void *tag); +int dsl_dir_open(const char *name, void *tag, dsl_dir_t **, const char **tail); +int dsl_dir_open_spa(spa_t *spa, const char *name, void *tag, dsl_dir_t **, + const char **tailp); +int dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj, + const char *tail, void *tag, dsl_dir_t **); +void dsl_dir_name(dsl_dir_t *dd, char *buf); +int dsl_dir_namelen(dsl_dir_t *dd); +int dsl_dir_is_private(dsl_dir_t *dd); +uint64_t dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, + const char *name, dmu_tx_t *tx); +dsl_checkfunc_t dsl_dir_destroy_check; +dsl_syncfunc_t dsl_dir_destroy_sync; +void dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv); +uint64_t dsl_dir_space_available(dsl_dir_t *dd, + dsl_dir_t *ancestor, int64_t delta, int ondiskonly); +void dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx); +void dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx); +int dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t mem, + uint64_t asize, uint64_t fsize, uint64_t usize, void **tr_cookiep, + dmu_tx_t *tx); +void dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx); +void dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx); +void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, + int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx); +void dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta, + dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx); +int dsl_dir_set_quota(const char *ddname, uint64_t quota); +int dsl_dir_set_reservation(const char *ddname, uint64_t reservation); +int dsl_dir_rename(dsl_dir_t *dd, const char *newname); +int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space); +int dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx); +boolean_t dsl_dir_is_clone(dsl_dir_t *dd); +void dsl_dir_new_refreservation(dsl_dir_t *dd, struct dsl_dataset *ds, + uint64_t reservation, cred_t *cr, dmu_tx_t *tx); + +/* internal reserved dir name */ +#define MOS_DIR_NAME "$MOS" +#define ORIGIN_DIR_NAME "$ORIGIN" + +#ifdef ZFS_DEBUG +#define dprintf_dd(dd, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char *__ds_name = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, \ + KM_SLEEP); \ + dsl_dir_name(dd, __ds_name); \ + dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \ + kmem_free(__ds_name, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); \ + } \ +_NOTE(CONSTCOND) } while (0) +#else +#define dprintf_dd(dd, fmt, ...) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_DIR_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_pool.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_pool.h new file mode 100644 index 000000000000..3bb4ad4efe55 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_pool.h @@ -0,0 +1,150 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_POOL_H +#define _SYS_DSL_POOL_H + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct objset; +struct dsl_dir; +struct dsl_dataset; +struct dsl_pool; +struct dmu_tx; + +enum scrub_func { + SCRUB_FUNC_NONE, + SCRUB_FUNC_CLEAN, + SCRUB_FUNC_NUMFUNCS +}; + +/* These macros are for indexing into the zfs_all_blkstats_t. */ +#define DMU_OT_DEFERRED DMU_OT_NONE +#define DMU_OT_TOTAL DMU_OT_NUMTYPES + +typedef struct zfs_blkstat { + uint64_t zb_count; + uint64_t zb_asize; + uint64_t zb_lsize; + uint64_t zb_psize; + uint64_t zb_gangs; + uint64_t zb_ditto_2_of_2_samevdev; + uint64_t zb_ditto_2_of_3_samevdev; + uint64_t zb_ditto_3_of_3_samevdev; +} zfs_blkstat_t; + +typedef struct zfs_all_blkstats { + zfs_blkstat_t zab_type[DN_MAX_LEVELS + 1][DMU_OT_TOTAL + 1]; +} zfs_all_blkstats_t; + + +typedef struct dsl_pool { + /* Immutable */ + spa_t *dp_spa; + struct objset *dp_meta_objset; + struct dsl_dir *dp_root_dir; + struct dsl_dir *dp_mos_dir; + struct dsl_dataset *dp_origin_snap; + uint64_t dp_root_dir_obj; + + /* No lock needed - sync context only */ + blkptr_t dp_meta_rootbp; + list_t dp_synced_datasets; + hrtime_t dp_read_overhead; + uint64_t dp_throughput; + uint64_t dp_write_limit; + + /* Uses dp_lock */ + kmutex_t dp_lock; + uint64_t dp_space_towrite[TXG_SIZE]; + uint64_t dp_tempreserved[TXG_SIZE]; + + enum scrub_func dp_scrub_func; + uint64_t dp_scrub_queue_obj; + uint64_t dp_scrub_min_txg; + uint64_t dp_scrub_max_txg; + zbookmark_t dp_scrub_bookmark; + boolean_t dp_scrub_pausing; + boolean_t dp_scrub_isresilver; + uint64_t dp_scrub_start_time; + kmutex_t dp_scrub_cancel_lock; /* protects dp_scrub_restart */ + boolean_t dp_scrub_restart; + + /* Has its own locking */ + tx_state_t dp_tx; + txg_list_t dp_dirty_datasets; + txg_list_t dp_dirty_dirs; + txg_list_t dp_sync_tasks; + + /* + * Protects administrative changes (properties, namespace) + * It is only held for write in syncing context. Therefore + * syncing context does not need to ever have it for read, since + * nobody else could possibly have it for write. + */ + krwlock_t dp_config_rwlock; + + zfs_all_blkstats_t *dp_blkstats; +} dsl_pool_t; + +int dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp); +void dsl_pool_close(dsl_pool_t *dp); +dsl_pool_t *dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg); +void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg); +void dsl_pool_zil_clean(dsl_pool_t *dp); +int dsl_pool_sync_context(dsl_pool_t *dp); +uint64_t dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree); +int dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx); +void dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx); +void dsl_pool_memory_pressure(dsl_pool_t *dp); +void dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx); +int dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp, + zio_done_func_t *done, void *private, uint32_t arc_flags); +void dsl_pool_ds_destroyed(struct dsl_dataset *ds, struct dmu_tx *tx); +void dsl_pool_ds_snapshotted(struct dsl_dataset *ds, struct dmu_tx *tx); +void dsl_pool_ds_clone_swapped(struct dsl_dataset *ds1, struct dsl_dataset *ds2, + struct dmu_tx *tx); +void dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx); +void dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx); + +int dsl_pool_scrub_cancel(dsl_pool_t *dp); +int dsl_pool_scrub_clean(dsl_pool_t *dp); +void dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx); +void dsl_pool_scrub_restart(dsl_pool_t *dp); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_POOL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_prop.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_prop.h new file mode 100644 index 000000000000..d66caa86cff6 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_prop.h @@ -0,0 +1,82 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_PROP_H +#define _SYS_DSL_PROP_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dsl_dataset; +struct dsl_dir; + +/* The callback func may not call into the DMU or DSL! */ +typedef void (dsl_prop_changed_cb_t)(void *arg, uint64_t newval); + +typedef struct dsl_prop_cb_record { + list_node_t cbr_node; /* link on dd_prop_cbs */ + struct dsl_dataset *cbr_ds; + const char *cbr_propname; + dsl_prop_changed_cb_t *cbr_func; + void *cbr_arg; +} dsl_prop_cb_record_t; + +int dsl_prop_register(struct dsl_dataset *ds, const char *propname, + dsl_prop_changed_cb_t *callback, void *cbarg); +int dsl_prop_unregister(struct dsl_dataset *ds, const char *propname, + dsl_prop_changed_cb_t *callback, void *cbarg); +int dsl_prop_numcb(struct dsl_dataset *ds); + +int dsl_prop_get(const char *ddname, const char *propname, + int intsz, int numints, void *buf, char *setpoint); +int dsl_prop_get_integer(const char *ddname, const char *propname, + uint64_t *valuep, char *setpoint); +int dsl_prop_get_all(objset_t *os, nvlist_t **nvp, boolean_t local); +int dsl_prop_get_ds(struct dsl_dataset *ds, const char *propname, + int intsz, int numints, void *buf, char *setpoint); +int dsl_prop_get_dd(struct dsl_dir *dd, const char *propname, + int intsz, int numints, void *buf, char *setpoint); + +int dsl_prop_set(const char *ddname, const char *propname, + int intsz, int numints, const void *buf); +void dsl_prop_set_uint64_sync(dsl_dir_t *dd, const char *name, uint64_t val, + cred_t *cr, dmu_tx_t *tx); + +void dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value); +void dsl_prop_nvlist_add_string(nvlist_t *nv, + zfs_prop_t prop, const char *value); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_PROP_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_synctask.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_synctask.h new file mode 100644 index 000000000000..4995bfe5acca --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/dsl_synctask.h @@ -0,0 +1,83 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_DSL_SYNCTASK_H +#define _SYS_DSL_SYNCTASK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct dsl_pool; + +typedef int (dsl_checkfunc_t)(void *, void *, dmu_tx_t *); +typedef void (dsl_syncfunc_t)(void *, void *, cred_t *, dmu_tx_t *); + +typedef struct dsl_sync_task { + list_node_t dst_node; + dsl_checkfunc_t *dst_checkfunc; + dsl_syncfunc_t *dst_syncfunc; + void *dst_arg1; + void *dst_arg2; + int dst_err; +} dsl_sync_task_t; + +typedef struct dsl_sync_task_group { + txg_node_t dstg_node; + list_t dstg_tasks; + struct dsl_pool *dstg_pool; + cred_t *dstg_cr; + uint64_t dstg_txg; + int dstg_err; + int dstg_space; + boolean_t dstg_nowaiter; +} dsl_sync_task_group_t; + +dsl_sync_task_group_t *dsl_sync_task_group_create(struct dsl_pool *dp); +void dsl_sync_task_create(dsl_sync_task_group_t *dstg, + dsl_checkfunc_t *, dsl_syncfunc_t *, + void *arg1, void *arg2, int blocks_modified); +int dsl_sync_task_group_wait(dsl_sync_task_group_t *dstg); +void dsl_sync_task_group_nowait(dsl_sync_task_group_t *dstg, dmu_tx_t *tx); +void dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg); +void dsl_sync_task_group_sync(dsl_sync_task_group_t *dstg, dmu_tx_t *tx); + +int dsl_sync_task_do(struct dsl_pool *dp, + dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, + void *arg1, void *arg2, int blocks_modified); +void dsl_sync_task_do_nowait(struct dsl_pool *dp, + dsl_checkfunc_t *checkfunc, dsl_syncfunc_t *syncfunc, + void *arg1, void *arg2, int blocks_modified, dmu_tx_t *tx); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_DSL_SYNCTASK_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab.h new file mode 100644 index 000000000000..1c9d89e8fd69 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab.h @@ -0,0 +1,71 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_METASLAB_H +#define _SYS_METASLAB_H + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct metaslab_class metaslab_class_t; +typedef struct metaslab_group metaslab_group_t; + +extern metaslab_t *metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, + uint64_t start, uint64_t size, uint64_t txg); +extern void metaslab_fini(metaslab_t *msp); +extern void metaslab_sync(metaslab_t *msp, uint64_t txg); +extern void metaslab_sync_done(metaslab_t *msp, uint64_t txg); + +#define METASLAB_HINTBP_FAVOR 0x0 +#define METASLAB_HINTBP_AVOID 0x1 +#define METASLAB_GANG_HEADER 0x2 + +extern int metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, + blkptr_t *bp, int ncopies, uint64_t txg, blkptr_t *hintbp, int flags); +extern void metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, + boolean_t now); +extern int metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg); + +extern metaslab_class_t *metaslab_class_create(void); +extern void metaslab_class_destroy(metaslab_class_t *mc); +extern void metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg); +extern void metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg); + +extern metaslab_group_t *metaslab_group_create(metaslab_class_t *mc, + vdev_t *vd); +extern void metaslab_group_destroy(metaslab_group_t *mg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_METASLAB_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab_impl.h new file mode 100644 index 000000000000..5980cbc843ac --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/metaslab_impl.h @@ -0,0 +1,81 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_METASLAB_IMPL_H +#define _SYS_METASLAB_IMPL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct metaslab_class { + metaslab_group_t *mc_rotor; + uint64_t mc_allocated; +}; + +struct metaslab_group { + kmutex_t mg_lock; + avl_tree_t mg_metaslab_tree; + uint64_t mg_aliquot; + int64_t mg_bias; + metaslab_class_t *mg_class; + vdev_t *mg_vd; + metaslab_group_t *mg_prev; + metaslab_group_t *mg_next; +}; + +/* + * Each metaslab's free space is tracked in space map object in the MOS, + * which is only updated in syncing context. Each time we sync a txg, + * we append the allocs and frees from that txg to the space map object. + * When the txg is done syncing, metaslab_sync_done() updates ms_smo + * to ms_smo_syncing. Everything in ms_smo is always safe to allocate. + */ +struct metaslab { + kmutex_t ms_lock; /* metaslab lock */ + space_map_obj_t ms_smo; /* synced space map object */ + space_map_obj_t ms_smo_syncing; /* syncing space map object */ + space_map_t ms_allocmap[TXG_SIZE]; /* allocated this txg */ + space_map_t ms_freemap[TXG_SIZE]; /* freed this txg */ + space_map_t ms_map; /* in-core free space map */ + uint64_t ms_weight; /* weight vs. others in group */ + metaslab_group_t *ms_group; /* metaslab group */ + avl_node_t ms_group_node; /* node in metaslab group tree */ + txg_node_t ms_txg_node; /* per-txg dirty metaslab links */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_METASLAB_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/refcount.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/refcount.h new file mode 100644 index 000000000000..d3fe7b1f899c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/refcount.h @@ -0,0 +1,104 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_REFCOUNT_H +#define _SYS_REFCOUNT_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * If the reference is held only by the calling function and not any + * particular object, use FTAG (which is a string) for the holder_tag. + * Otherwise, use the object that holds the reference. + */ +#define FTAG ((char *)__func__) + +#if defined(DEBUG) || !defined(_KERNEL) +typedef struct reference { + list_node_t ref_link; + void *ref_holder; + uint64_t ref_number; + uint8_t *ref_removed; +} reference_t; + +typedef struct refcount { + kmutex_t rc_mtx; + list_t rc_list; + list_t rc_removed; + int64_t rc_count; + int64_t rc_removed_count; +} refcount_t; + +/* Note: refcount_t must be initialized with refcount_create() */ + +void refcount_create(refcount_t *rc); +void refcount_destroy(refcount_t *rc); +void refcount_destroy_many(refcount_t *rc, uint64_t number); +int refcount_is_zero(refcount_t *rc); +int64_t refcount_count(refcount_t *rc); +int64_t refcount_add(refcount_t *rc, void *holder_tag); +int64_t refcount_remove(refcount_t *rc, void *holder_tag); +int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag); +int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag); + +void refcount_init(void); +void refcount_fini(void); + +#else /* DEBUG */ + +typedef struct refcount { + uint64_t rc_count; +} refcount_t; + +#define refcount_create(rc) ((rc)->rc_count = 0) +#define refcount_destroy(rc) ((rc)->rc_count = 0) +#define refcount_destroy_many(rc, number) ((rc)->rc_count = 0) +#define refcount_is_zero(rc) ((rc)->rc_count == 0) +#define refcount_count(rc) ((rc)->rc_count) +#define refcount_add(rc, holder) atomic_add_64_nv(&(rc)->rc_count, 1) +#define refcount_remove(rc, holder) atomic_add_64_nv(&(rc)->rc_count, -1) +#define refcount_add_many(rc, number, holder) \ + atomic_add_64_nv(&(rc)->rc_count, number) +#define refcount_remove_many(rc, number, holder) \ + atomic_add_64_nv(&(rc)->rc_count, -number) + +#define refcount_init() +#define refcount_fini() + +#endif /* DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_REFCOUNT_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/rrwlock.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/rrwlock.h new file mode 100644 index 000000000000..19a43c97fc3c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/rrwlock.h @@ -0,0 +1,80 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_RR_RW_LOCK_H +#define _SYS_RR_RW_LOCK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + * A reader-writer lock implementation that allows re-entrant reads, but + * still gives writers priority on "new" reads. + * + * See rrwlock.c for more details about the implementation. + * + * Fields of the rrwlock_t structure: + * - rr_lock: protects modification and reading of rrwlock_t fields + * - rr_cv: cv for waking up readers or waiting writers + * - rr_writer: thread id of the current writer + * - rr_anon_rount: number of active anonymous readers + * - rr_linked_rcount: total number of non-anonymous active readers + * - rr_writer_wanted: a writer wants the lock + */ +typedef struct rrwlock { + kmutex_t rr_lock; + kcondvar_t rr_cv; + kthread_t *rr_writer; + refcount_t rr_anon_rcount; + refcount_t rr_linked_rcount; + boolean_t rr_writer_wanted; +} rrwlock_t; + +/* + * 'tag' is used in reference counting tracking. The + * 'tag' must be the same in a rrw_enter() as in its + * corresponding rrw_exit(). + */ +void rrw_init(rrwlock_t *rrl); +void rrw_destroy(rrwlock_t *rrl); +void rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag); +void rrw_exit(rrwlock_t *rrl, void *tag); +boolean_t rrw_held(rrwlock_t *rrl, krw_t rw); + +#define RRW_READ_HELD(x) rrw_held(x, RW_READER) +#define RRW_WRITE_HELD(x) rrw_held(x, RW_WRITER) + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_RR_RW_LOCK_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa.h new file mode 100644 index 000000000000..24b3ca447679 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa.h @@ -0,0 +1,554 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_SPA_H +#define _SYS_SPA_H + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Forward references that lots of things need. + */ +typedef struct spa spa_t; +typedef struct vdev vdev_t; +typedef struct metaslab metaslab_t; +typedef struct zilog zilog_t; +typedef struct spa_aux_vdev spa_aux_vdev_t; +struct dsl_pool; + +/* + * General-purpose 32-bit and 64-bit bitfield encodings. + */ +#define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) +#define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) +#define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low)) +#define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low)) + +#define BF32_GET(x, low, len) BF32_DECODE(x, low, len) +#define BF64_GET(x, low, len) BF64_DECODE(x, low, len) + +#define BF32_SET(x, low, len, val) \ + ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len)) +#define BF64_SET(x, low, len, val) \ + ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)) + +#define BF32_GET_SB(x, low, len, shift, bias) \ + ((BF32_GET(x, low, len) + (bias)) << (shift)) +#define BF64_GET_SB(x, low, len, shift, bias) \ + ((BF64_GET(x, low, len) + (bias)) << (shift)) + +#define BF32_SET_SB(x, low, len, shift, bias, val) \ + BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) +#define BF64_SET_SB(x, low, len, shift, bias, val) \ + BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) + +/* + * We currently support nine block sizes, from 512 bytes to 128K. + * We could go higher, but the benefits are near-zero and the cost + * of COWing a giant block to modify one byte would become excessive. + */ +#define SPA_MINBLOCKSHIFT 9 +#define SPA_MAXBLOCKSHIFT 17 +#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) +#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) + +#define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1) + +/* + * Size of block to hold the configuration data (a packed nvlist) + */ +#define SPA_CONFIG_BLOCKSIZE (1 << 14) + +/* + * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. + * The ASIZE encoding should be at least 64 times larger (6 more bits) + * to support up to 4-way RAID-Z mirror mode with worst-case gang block + * overhead, three DVAs per bp, plus one more bit in case we do anything + * else that expands the ASIZE. + */ +#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ +#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ +#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ + +/* + * All SPA data is represented by 128-bit data virtual addresses (DVAs). + * The members of the dva_t should be considered opaque outside the SPA. + */ +typedef struct dva { + uint64_t dva_word[2]; +} dva_t; + +/* + * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. + */ +typedef struct zio_cksum { + uint64_t zc_word[4]; +} zio_cksum_t; + +/* + * Each block is described by its DVAs, time of birth, checksum, etc. + * The word-by-word, bit-by-bit layout of the blkptr is as follows: + * + * 64 56 48 40 32 24 16 8 0 + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 0 | vdev1 | GRID | ASIZE | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 1 |G| offset1 | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 2 | vdev2 | GRID | ASIZE | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 3 |G| offset2 | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 4 | vdev3 | GRID | ASIZE | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 5 |G| offset3 | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 6 |E| lvl | type | cksum | comp | PSIZE | LSIZE | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 7 | padding | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 8 | padding | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 9 | padding | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * a | birth txg | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * b | fill count | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * c | checksum[0] | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * d | checksum[1] | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * e | checksum[2] | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * f | checksum[3] | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * + * Legend: + * + * vdev virtual device ID + * offset offset into virtual device + * LSIZE logical size + * PSIZE physical size (after compression) + * ASIZE allocated size (including RAID-Z parity and gang block headers) + * GRID RAID-Z layout information (reserved for future use) + * cksum checksum function + * comp compression function + * G gang block indicator + * E endianness + * type DMU object type + * lvl level of indirection + * birth txg transaction group in which the block was born + * fill count number of non-zero blocks under this bp + * checksum[4] 256-bit checksum of the data this bp describes + */ +typedef struct blkptr { + dva_t blk_dva[3]; /* 128-bit Data Virtual Address */ + uint64_t blk_prop; /* size, compression, type, etc */ + uint64_t blk_pad[3]; /* Extra space for the future */ + uint64_t blk_birth; /* transaction group at birth */ + uint64_t blk_fill; /* fill count */ + zio_cksum_t blk_cksum; /* 256-bit checksum */ +} blkptr_t; + +#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ +#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ + +/* + * Macros to get and set fields in a bp or DVA. + */ +#define DVA_GET_ASIZE(dva) \ + BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0) +#define DVA_SET_ASIZE(dva, x) \ + BF64_SET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0, x) + +#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8) +#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x) + +#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32) +#define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x) + +#define DVA_GET_OFFSET(dva) \ + BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0) +#define DVA_SET_OFFSET(dva, x) \ + BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x) + +#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1) +#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x) + +#define BP_GET_LSIZE(bp) \ + (BP_IS_HOLE(bp) ? 0 : \ + BF64_GET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1)) +#define BP_SET_LSIZE(bp, x) \ + BF64_SET_SB((bp)->blk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1, x) + +#define BP_GET_PSIZE(bp) \ + BF64_GET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1) +#define BP_SET_PSIZE(bp, x) \ + BF64_SET_SB((bp)->blk_prop, 16, 16, SPA_MINBLOCKSHIFT, 1, x) + +#define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 8) +#define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 8, x) + +#define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8) +#define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x) + +#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8) +#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x) + +#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5) +#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x) + +#define BP_GET_BYTEORDER(bp) (0 - BF64_GET((bp)->blk_prop, 63, 1)) +#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x) + +#define BP_GET_ASIZE(bp) \ + (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ + DVA_GET_ASIZE(&(bp)->blk_dva[2])) + +#define BP_GET_UCSIZE(bp) \ + ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \ + BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp)); + +#define BP_GET_NDVAS(bp) \ + (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \ + !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ + !!DVA_GET_ASIZE(&(bp)->blk_dva[2])) + +#define BP_COUNT_GANG(bp) \ + (DVA_GET_GANG(&(bp)->blk_dva[0]) + \ + DVA_GET_GANG(&(bp)->blk_dva[1]) + \ + DVA_GET_GANG(&(bp)->blk_dva[2])) + +#define DVA_EQUAL(dva1, dva2) \ + ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \ + (dva1)->dva_word[0] == (dva2)->dva_word[0]) + +#define ZIO_CHECKSUM_EQUAL(zc1, zc2) \ + (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \ + ((zc1).zc_word[1] - (zc2).zc_word[1]) | \ + ((zc1).zc_word[2] - (zc2).zc_word[2]) | \ + ((zc1).zc_word[3] - (zc2).zc_word[3]))) + +#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0) + +#define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \ +{ \ + (zcp)->zc_word[0] = w0; \ + (zcp)->zc_word[1] = w1; \ + (zcp)->zc_word[2] = w2; \ + (zcp)->zc_word[3] = w3; \ +} + +#define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) +#define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) +#define BP_IS_HOLE(bp) ((bp)->blk_birth == 0) +#define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg)) + +#define BP_ZERO(bp) \ +{ \ + (bp)->blk_dva[0].dva_word[0] = 0; \ + (bp)->blk_dva[0].dva_word[1] = 0; \ + (bp)->blk_dva[1].dva_word[0] = 0; \ + (bp)->blk_dva[1].dva_word[1] = 0; \ + (bp)->blk_dva[2].dva_word[0] = 0; \ + (bp)->blk_dva[2].dva_word[1] = 0; \ + (bp)->blk_prop = 0; \ + (bp)->blk_pad[0] = 0; \ + (bp)->blk_pad[1] = 0; \ + (bp)->blk_pad[2] = 0; \ + (bp)->blk_birth = 0; \ + (bp)->blk_fill = 0; \ + ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ +} + +#define BLK_FILL_ALREADY_FREED (-1ULL) + +/* + * Note: the byteorder is either 0 or -1, both of which are palindromes. + * This simplifies the endianness handling a bit. + */ +#ifdef _BIG_ENDIAN +#define ZFS_HOST_BYTEORDER (0ULL) +#else +#define ZFS_HOST_BYTEORDER (-1ULL) +#endif + +#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER) + +#define BP_SPRINTF_LEN 320 + +#include + +#define BP_GET_BUFC_TYPE(bp) \ + (((BP_GET_LEVEL(bp) > 0) || (dmu_ot[BP_GET_TYPE(bp)].ot_metadata)) ? \ + ARC_BUFC_METADATA : ARC_BUFC_DATA); +/* + * Routines found in spa.c + */ + +/* state manipulation functions */ +extern int spa_open(const char *pool, spa_t **, void *tag); +extern int spa_get_stats(const char *pool, nvlist_t **config, + char *altroot, size_t buflen); +extern int spa_create(const char *pool, nvlist_t *config, nvlist_t *props, + const char *history_str, nvlist_t *zplprops); +extern int spa_check_rootconf(char *devpath, char *devid, + nvlist_t **bestconf, uint64_t *besttxg); +extern boolean_t spa_rootdev_validate(nvlist_t *nv); +extern int spa_import_rootpool(char *devpath, char *devid); +extern int spa_import(const char *pool, nvlist_t *config, nvlist_t *props); +extern int spa_import_faulted(const char *, nvlist_t *, nvlist_t *); +extern nvlist_t *spa_tryimport(nvlist_t *tryconfig); +extern int spa_destroy(char *pool); +extern int spa_export(char *pool, nvlist_t **oldconfig, boolean_t force); +extern int spa_reset(char *pool); +extern void spa_async_request(spa_t *spa, int flag); +extern void spa_async_unrequest(spa_t *spa, int flag); +extern void spa_async_suspend(spa_t *spa); +extern void spa_async_resume(spa_t *spa); +extern spa_t *spa_inject_addref(char *pool); +extern void spa_inject_delref(spa_t *spa); + +#define SPA_ASYNC_CONFIG_UPDATE 0x01 +#define SPA_ASYNC_REMOVE 0x02 +#define SPA_ASYNC_PROBE 0x04 +#define SPA_ASYNC_RESILVER_DONE 0x08 +#define SPA_ASYNC_RESILVER 0x10 + +/* device manipulation */ +extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot); +extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, + int replacing); +extern int spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done); +extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare); +extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath); + +/* spare state (which is global across all pools) */ +extern void spa_spare_add(vdev_t *vd); +extern void spa_spare_remove(vdev_t *vd); +extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt); +extern void spa_spare_activate(vdev_t *vd); + +/* L2ARC state (which is global across all pools) */ +extern void spa_l2cache_add(vdev_t *vd); +extern void spa_l2cache_remove(vdev_t *vd); +extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool); +extern void spa_l2cache_activate(vdev_t *vd); +extern void spa_l2cache_drop(spa_t *spa); +extern void spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc); + +/* scrubbing */ +extern int spa_scrub(spa_t *spa, pool_scrub_type_t type); + +/* spa syncing */ +extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */ +extern void spa_sync_allpools(void); + +/* spa namespace global mutex */ +extern kmutex_t spa_namespace_lock; + +/* + * SPA configuration functions in spa_config.c + */ + +#define SPA_CONFIG_UPDATE_POOL 0 +#define SPA_CONFIG_UPDATE_VDEVS 1 + +extern void spa_config_sync(spa_t *, boolean_t, boolean_t); +extern void spa_config_load(void); +extern nvlist_t *spa_all_configs(uint64_t *); +extern void spa_config_set(spa_t *spa, nvlist_t *config); +extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, + int getstats); +extern void spa_config_update(spa_t *spa, int what); +extern void spa_config_update_common(spa_t *spa, int what, boolean_t isroot); + +/* + * Miscellaneous SPA routines in spa_misc.c + */ + +/* Namespace manipulation */ +extern spa_t *spa_lookup(const char *name); +extern spa_t *spa_add(const char *name, const char *altroot); +extern void spa_remove(spa_t *spa); +extern spa_t *spa_next(spa_t *prev); + +/* Refcount functions */ +extern void spa_open_ref(spa_t *spa, void *tag); +extern void spa_close(spa_t *spa, void *tag); +extern boolean_t spa_refcount_zero(spa_t *spa); + +#define SCL_CONFIG 0x01 +#define SCL_STATE 0x02 +#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */ +#define SCL_ALLOC 0x08 +#define SCL_ZIO 0x10 +#define SCL_FREE 0x20 +#define SCL_VDEV 0x40 +#define SCL_LOCKS 7 +#define SCL_ALL ((1 << SCL_LOCKS) - 1) +#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO) + +/* Pool configuration locks */ +extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw); +extern void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw); +extern void spa_config_exit(spa_t *spa, int locks, void *tag); +extern int spa_config_held(spa_t *spa, int locks, krw_t rw); + +/* Pool vdev add/remove lock */ +extern uint64_t spa_vdev_enter(spa_t *spa); +extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error); + +/* Pool vdev state change lock */ +extern void spa_vdev_state_enter(spa_t *spa); +extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error); + +/* Accessor functions */ +extern boolean_t spa_shutting_down(spa_t *spa); +extern struct dsl_pool *spa_get_dsl(spa_t *spa); +extern blkptr_t *spa_get_rootblkptr(spa_t *spa); +extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp); +extern void spa_altroot(spa_t *, char *, size_t); +extern int spa_sync_pass(spa_t *spa); +extern char *spa_name(spa_t *spa); +extern uint64_t spa_guid(spa_t *spa); +extern uint64_t spa_last_synced_txg(spa_t *spa); +extern uint64_t spa_first_txg(spa_t *spa); +extern uint64_t spa_version(spa_t *spa); +extern pool_state_t spa_state(spa_t *spa); +extern uint64_t spa_freeze_txg(spa_t *spa); +extern uint64_t spa_get_alloc(spa_t *spa); +extern uint64_t spa_get_space(spa_t *spa); +extern uint64_t spa_get_dspace(spa_t *spa); +extern uint64_t spa_get_asize(spa_t *spa, uint64_t lsize); +extern uint64_t spa_version(spa_t *spa); +extern int spa_max_replication(spa_t *spa); +extern int spa_busy(void); +extern uint8_t spa_get_failmode(spa_t *spa); +extern boolean_t spa_suspended(spa_t *spa); + +/* Miscellaneous support routines */ +extern int spa_rename(const char *oldname, const char *newname); +extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid); +extern char *spa_strdup(const char *); +extern void spa_strfree(char *); +extern uint64_t spa_get_random(uint64_t range); +extern void sprintf_blkptr(char *buf, int len, const blkptr_t *bp); +extern void spa_freeze(spa_t *spa); +extern void spa_upgrade(spa_t *spa, uint64_t version); +extern void spa_evict_all(void); +extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid, + boolean_t l2cache); +extern boolean_t spa_has_spare(spa_t *, uint64_t guid); +extern uint64_t bp_get_dasize(spa_t *spa, const blkptr_t *bp); +extern boolean_t spa_has_slogs(spa_t *spa); +extern boolean_t spa_is_root(spa_t *spa); + +/* history logging */ +typedef enum history_log_type { + LOG_CMD_POOL_CREATE, + LOG_CMD_NORMAL, + LOG_INTERNAL +} history_log_type_t; + +typedef struct history_arg { + const char *ha_history_str; + history_log_type_t ha_log_type; + history_internal_events_t ha_event; + char ha_zone[MAXPATHLEN]; +} history_arg_t; + +extern char *spa_his_ievent_table[]; + +extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx); +extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read, + char *his_buf); +extern int spa_history_log(spa_t *spa, const char *his_buf, + history_log_type_t what); +void spa_history_internal_log(history_internal_events_t event, spa_t *spa, + dmu_tx_t *tx, cred_t *cr, const char *fmt, ...); + +/* error handling */ +struct zbookmark; +struct zio; +extern void spa_log_error(spa_t *spa, struct zio *zio); +extern void zfs_ereport_post(const char *class, spa_t *spa, vdev_t *vd, + struct zio *zio, uint64_t stateoroffset, uint64_t length); +extern void zfs_post_remove(spa_t *spa, vdev_t *vd); +extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd); +extern uint64_t spa_get_errlog_size(spa_t *spa); +extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count); +extern void spa_errlog_rotate(spa_t *spa); +extern void spa_errlog_drain(spa_t *spa); +extern void spa_errlog_sync(spa_t *spa, uint64_t txg); +extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub); + +/* vdev cache */ +extern void vdev_cache_stat_init(void); +extern void vdev_cache_stat_fini(void); + +/* Initialization and termination */ +extern void spa_init(int flags); +extern void spa_fini(void); +extern void spa_boot_init(); + +/* properties */ +extern int spa_prop_set(spa_t *spa, nvlist_t *nvp); +extern int spa_prop_get(spa_t *spa, nvlist_t **nvp); +extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx); + +/* asynchronous event notification */ +extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name); + +#ifdef ZFS_DEBUG +#define dprintf_bp(bp, fmt, ...) do { \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ + char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \ + sprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \ + dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \ + kmem_free(__blkbuf, BP_SPRINTF_LEN); \ + } \ +_NOTE(CONSTCOND) } while (0) +#else +#define dprintf_bp(bp, fmt, ...) +#endif + +extern int spa_mode; /* mode, e.g. FREAD | FWRITE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SPA_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_boot.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_boot.h new file mode 100644 index 000000000000..b56073b97516 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_boot.h @@ -0,0 +1,45 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_SPA_BOOT_H +#define _SYS_SPA_BOOT_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern char *spa_get_bootprop(char *prop); +extern void spa_free_bootprop(char *prop); +extern int spa_get_rootconf(char *devpath, char *devid, nvlist_t **bestconf_p); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SPA_BOOT_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_impl.h new file mode 100644 index 000000000000..8aeb414fe9de --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/spa_impl.h @@ -0,0 +1,196 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_SPA_IMPL_H +#define _SYS_SPA_IMPL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct spa_error_entry { + zbookmark_t se_bookmark; + char *se_name; + avl_node_t se_avl; +} spa_error_entry_t; + +typedef struct spa_history_phys { + uint64_t sh_pool_create_len; /* ending offset of zpool create */ + uint64_t sh_phys_max_off; /* physical EOF */ + uint64_t sh_bof; /* logical BOF */ + uint64_t sh_eof; /* logical EOF */ + uint64_t sh_records_lost; /* num of records overwritten */ +} spa_history_phys_t; + +struct spa_aux_vdev { + uint64_t sav_object; /* MOS object for device list */ + nvlist_t *sav_config; /* cached device config */ + vdev_t **sav_vdevs; /* devices */ + int sav_count; /* number devices */ + boolean_t sav_sync; /* sync the device list */ + nvlist_t **sav_pending; /* pending device additions */ + uint_t sav_npending; /* # pending devices */ +}; + +typedef struct spa_config_lock { + kmutex_t scl_lock; + kthread_t *scl_writer; + int scl_write_wanted; + kcondvar_t scl_cv; + refcount_t scl_count; +} spa_config_lock_t; + +typedef struct spa_config_dirent { + list_node_t scd_link; + char *scd_path; +} spa_config_dirent_t; + +typedef enum spa_log_state { + SPA_LOG_UNKNOWN = 0, /* unknown log state */ + SPA_LOG_MISSING, /* missing log(s) */ + SPA_LOG_CLEAR, /* clear the log(s) */ + SPA_LOG_GOOD, /* log(s) are good */ +} spa_log_state_t; + +enum zio_taskq_type { + ZIO_TASKQ_ISSUE = 0, + ZIO_TASKQ_INTERRUPT, + ZIO_TASKQ_TYPES +}; + +struct spa { + /* + * Fields protected by spa_namespace_lock. + */ + char spa_name[MAXNAMELEN]; /* pool name */ + avl_node_t spa_avl; /* node in spa_namespace_avl */ + nvlist_t *spa_config; /* last synced config */ + nvlist_t *spa_config_syncing; /* currently syncing config */ + uint64_t spa_config_txg; /* txg of last config change */ + int spa_sync_pass; /* iterate-to-convergence */ + pool_state_t spa_state; /* pool state */ + int spa_inject_ref; /* injection references */ + uint8_t spa_sync_on; /* sync threads are running */ + spa_load_state_t spa_load_state; /* current load operation */ + taskq_t *spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES]; + dsl_pool_t *spa_dsl_pool; + metaslab_class_t *spa_normal_class; /* normal data class */ + metaslab_class_t *spa_log_class; /* intent log data class */ + uint64_t spa_first_txg; /* first txg after spa_open() */ + uint64_t spa_final_txg; /* txg of export/destroy */ + uint64_t spa_freeze_txg; /* freeze pool at this txg */ + objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ + txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ + vdev_t *spa_root_vdev; /* top-level vdev container */ + uint64_t spa_load_guid; /* initial guid for spa_load */ + list_t spa_config_dirty_list; /* vdevs with dirty config */ + list_t spa_state_dirty_list; /* vdevs with dirty state */ + spa_aux_vdev_t spa_spares; /* hot spares */ + spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */ + uint64_t spa_config_object; /* MOS object for pool config */ + uint64_t spa_syncing_txg; /* txg currently syncing */ + uint64_t spa_sync_bplist_obj; /* object for deferred frees */ + bplist_t spa_sync_bplist; /* deferred-free bplist */ + uberblock_t spa_ubsync; /* last synced uberblock */ + uberblock_t spa_uberblock; /* current uberblock */ + kmutex_t spa_scrub_lock; /* resilver/scrub lock */ + uint64_t spa_scrub_inflight; /* in-flight scrub I/Os */ + uint64_t spa_scrub_maxinflight; /* max in-flight scrub I/Os */ + uint64_t spa_scrub_errors; /* scrub I/O error count */ + kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */ + uint8_t spa_scrub_active; /* active or suspended? */ + uint8_t spa_scrub_type; /* type of scrub we're doing */ + uint8_t spa_scrub_finished; /* indicator to rotate logs */ + uint8_t spa_scrub_started; /* started since last boot */ + uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */ + kmutex_t spa_async_lock; /* protect async state */ + kthread_t *spa_async_thread; /* thread doing async task */ + int spa_async_suspended; /* async tasks suspended */ + kcondvar_t spa_async_cv; /* wait for thread_exit() */ + uint16_t spa_async_tasks; /* async task mask */ + kmutex_t spa_async_root_lock; /* protects async root count */ + uint64_t spa_async_root_count; /* number of async root zios */ + kcondvar_t spa_async_root_cv; /* notify when count == 0 */ + char *spa_root; /* alternate root directory */ + uint64_t spa_ena; /* spa-wide ereport ENA */ + boolean_t spa_last_open_failed; /* true if last open faled */ + kmutex_t spa_errlog_lock; /* error log lock */ + uint64_t spa_errlog_last; /* last error log object */ + uint64_t spa_errlog_scrub; /* scrub error log object */ + kmutex_t spa_errlist_lock; /* error list/ereport lock */ + avl_tree_t spa_errlist_last; /* last error list */ + avl_tree_t spa_errlist_scrub; /* scrub error list */ + uint64_t spa_deflate; /* should we deflate? */ + uint64_t spa_history; /* history object */ + kmutex_t spa_history_lock; /* history lock */ + vdev_t *spa_pending_vdev; /* pending vdev additions */ + kmutex_t spa_props_lock; /* property lock */ + uint64_t spa_pool_props_object; /* object for properties */ + uint64_t spa_bootfs; /* default boot filesystem */ + uint64_t spa_failmode; /* failure mode for the pool */ + uint64_t spa_delegation; /* delegation on/off */ + list_t spa_config_list; /* previous cache file(s) */ + zio_t *spa_suspend_zio_root; /* root of all suspended I/O */ + kmutex_t spa_suspend_lock; /* protects suspend_zio_root */ + kcondvar_t spa_suspend_cv; /* notification of resume */ + uint8_t spa_suspended; /* pool is suspended */ + boolean_t spa_import_faulted; /* allow faulted vdevs */ + boolean_t spa_is_root; /* pool is root */ + int spa_minref; /* num refs when first opened */ + spa_log_state_t spa_log_state; /* log state */ + /* + * spa_refcnt & spa_config_lock must be the last elements + * because refcount_t changes size based on compilation options. + * In order for the MDB module to function correctly, the other + * fields must remain in the same location. + */ + spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */ + refcount_t spa_refcount; /* number of opens */ +}; + +extern const char *spa_config_path; + +#define BOOTFS_COMPRESS_VALID(compress) \ + ((compress) == ZIO_COMPRESS_LZJB || \ + ((compress) == ZIO_COMPRESS_ON && \ + ZIO_COMPRESS_ON_VALUE == ZIO_COMPRESS_LZJB) || \ + (compress) == ZIO_COMPRESS_OFF) + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SPA_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/space_map.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/space_map.h new file mode 100644 index 000000000000..db9daef1f156 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/space_map.h @@ -0,0 +1,162 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_SPACE_MAP_H +#define _SYS_SPACE_MAP_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct space_map_ops space_map_ops_t; + +typedef struct space_map { + avl_tree_t sm_root; /* AVL tree of map segments */ + uint64_t sm_space; /* sum of all segments in the map */ + uint64_t sm_start; /* start of map */ + uint64_t sm_size; /* size of map */ + uint8_t sm_shift; /* unit shift */ + uint8_t sm_pad[3]; /* unused */ + uint8_t sm_loaded; /* map loaded? */ + uint8_t sm_loading; /* map loading? */ + kcondvar_t sm_load_cv; /* map load completion */ + space_map_ops_t *sm_ops; /* space map block picker ops vector */ + void *sm_ppd; /* picker-private data */ + kmutex_t *sm_lock; /* pointer to lock that protects map */ +} space_map_t; + +typedef struct space_seg { + avl_node_t ss_node; /* AVL node */ + uint64_t ss_start; /* starting offset of this segment */ + uint64_t ss_end; /* ending offset (non-inclusive) */ +} space_seg_t; + +typedef struct space_map_obj { + uint64_t smo_object; /* on-disk space map object */ + uint64_t smo_objsize; /* size of the object */ + uint64_t smo_alloc; /* space allocated from the map */ +} space_map_obj_t; + +struct space_map_ops { + void (*smop_load)(space_map_t *sm); + void (*smop_unload)(space_map_t *sm); + uint64_t (*smop_alloc)(space_map_t *sm, uint64_t size); + void (*smop_claim)(space_map_t *sm, uint64_t start, uint64_t size); + void (*smop_free)(space_map_t *sm, uint64_t start, uint64_t size); +}; + +/* + * debug entry + * + * 1 3 10 50 + * ,---+--------+------------+---------------------------------. + * | 1 | action | syncpass | txg (lower bits) | + * `---+--------+------------+---------------------------------' + * 63 62 60 59 50 49 0 + * + * + * + * non-debug entry + * + * 1 47 1 15 + * ,-----------------------------------------------------------. + * | 0 | offset (sm_shift units) | type | run | + * `-----------------------------------------------------------' + * 63 62 17 16 15 0 + */ + +/* All this stuff takes and returns bytes */ +#define SM_RUN_DECODE(x) (BF64_DECODE(x, 0, 15) + 1) +#define SM_RUN_ENCODE(x) BF64_ENCODE((x) - 1, 0, 15) +#define SM_TYPE_DECODE(x) BF64_DECODE(x, 15, 1) +#define SM_TYPE_ENCODE(x) BF64_ENCODE(x, 15, 1) +#define SM_OFFSET_DECODE(x) BF64_DECODE(x, 16, 47) +#define SM_OFFSET_ENCODE(x) BF64_ENCODE(x, 16, 47) +#define SM_DEBUG_DECODE(x) BF64_DECODE(x, 63, 1) +#define SM_DEBUG_ENCODE(x) BF64_ENCODE(x, 63, 1) + +#define SM_DEBUG_ACTION_DECODE(x) BF64_DECODE(x, 60, 3) +#define SM_DEBUG_ACTION_ENCODE(x) BF64_ENCODE(x, 60, 3) + +#define SM_DEBUG_SYNCPASS_DECODE(x) BF64_DECODE(x, 50, 10) +#define SM_DEBUG_SYNCPASS_ENCODE(x) BF64_ENCODE(x, 50, 10) + +#define SM_DEBUG_TXG_DECODE(x) BF64_DECODE(x, 0, 50) +#define SM_DEBUG_TXG_ENCODE(x) BF64_ENCODE(x, 0, 50) + +#define SM_RUN_MAX SM_RUN_DECODE(~0ULL) + +#define SM_ALLOC 0x0 +#define SM_FREE 0x1 + +/* + * The data for a given space map can be kept on blocks of any size. + * Larger blocks entail fewer i/o operations, but they also cause the + * DMU to keep more data in-core, and also to waste more i/o bandwidth + * when only a few blocks have changed since the last transaction group. + * This could use a lot more research, but for now, set the freelist + * block size to 4k (2^12). + */ +#define SPACE_MAP_BLOCKSHIFT 12 + +typedef void space_map_func_t(space_map_t *sm, uint64_t start, uint64_t size); + +extern void space_map_create(space_map_t *sm, uint64_t start, uint64_t size, + uint8_t shift, kmutex_t *lp); +extern void space_map_destroy(space_map_t *sm); +extern void space_map_add(space_map_t *sm, uint64_t start, uint64_t size); +extern void space_map_remove(space_map_t *sm, uint64_t start, uint64_t size); +extern int space_map_contains(space_map_t *sm, uint64_t start, uint64_t size); +extern void space_map_vacate(space_map_t *sm, + space_map_func_t *func, space_map_t *mdest); +extern void space_map_walk(space_map_t *sm, + space_map_func_t *func, space_map_t *mdest); +extern void space_map_excise(space_map_t *sm, uint64_t start, uint64_t size); +extern void space_map_union(space_map_t *smd, space_map_t *sms); + +extern void space_map_load_wait(space_map_t *sm); +extern int space_map_load(space_map_t *sm, space_map_ops_t *ops, + uint8_t maptype, space_map_obj_t *smo, objset_t *os); +extern void space_map_unload(space_map_t *sm); + +extern uint64_t space_map_alloc(space_map_t *sm, uint64_t size); +extern void space_map_claim(space_map_t *sm, uint64_t start, uint64_t size); +extern void space_map_free(space_map_t *sm, uint64_t start, uint64_t size); + +extern void space_map_sync(space_map_t *sm, uint8_t maptype, + space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx); +extern void space_map_truncate(space_map_obj_t *smo, + objset_t *os, dmu_tx_t *tx); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SPACE_MAP_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg.h new file mode 100644 index 000000000000..23bdff211b4a --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg.h @@ -0,0 +1,130 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_TXG_H +#define _SYS_TXG_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define TXG_CONCURRENT_STATES 3 /* open, quiescing, syncing */ +#define TXG_SIZE 4 /* next power of 2 */ +#define TXG_MASK (TXG_SIZE - 1) /* mask for size */ +#define TXG_INITIAL TXG_SIZE /* initial txg */ +#define TXG_IDX (txg & TXG_MASK) + +#define TXG_WAIT 1ULL +#define TXG_NOWAIT 2ULL + +typedef struct tx_cpu tx_cpu_t; + +typedef struct txg_handle { + tx_cpu_t *th_cpu; + uint64_t th_txg; +} txg_handle_t; + +typedef struct txg_node { + struct txg_node *tn_next[TXG_SIZE]; + uint8_t tn_member[TXG_SIZE]; +} txg_node_t; + +typedef struct txg_list { + kmutex_t tl_lock; + size_t tl_offset; + txg_node_t *tl_head[TXG_SIZE]; +} txg_list_t; + +struct dsl_pool; + +extern void txg_init(struct dsl_pool *dp, uint64_t txg); +extern void txg_fini(struct dsl_pool *dp); +extern void txg_sync_start(struct dsl_pool *dp); +extern void txg_sync_stop(struct dsl_pool *dp); +extern uint64_t txg_hold_open(struct dsl_pool *dp, txg_handle_t *txghp); +extern void txg_rele_to_quiesce(txg_handle_t *txghp); +extern void txg_rele_to_sync(txg_handle_t *txghp); +extern void txg_suspend(struct dsl_pool *dp); +extern void txg_resume(struct dsl_pool *dp); + +/* + * Delay the caller by the specified number of ticks or until + * the txg closes (whichever comes first). This is intended + * to be used to throttle writers when the system nears its + * capacity. + */ +extern void txg_delay(struct dsl_pool *dp, uint64_t txg, int ticks); + +/* + * Wait until the given transaction group has finished syncing. + * Try to make this happen as soon as possible (eg. kick off any + * necessary syncs immediately). If txg==0, wait for the currently open + * txg to finish syncing. + */ +extern void txg_wait_synced(struct dsl_pool *dp, uint64_t txg); + +/* + * Wait until the given transaction group, or one after it, is + * the open transaction group. Try to make this happen as soon + * as possible (eg. kick off any necessary syncs immediately). + * If txg == 0, wait for the next open txg. + */ +extern void txg_wait_open(struct dsl_pool *dp, uint64_t txg); + +/* + * Returns TRUE if we are "backed up" waiting for the syncing + * transaction to complete; otherwise returns FALSE. + */ +extern boolean_t txg_stalled(struct dsl_pool *dp); + +/* returns TRUE if someone is waiting for the next txg to sync */ +extern boolean_t txg_sync_waiting(struct dsl_pool *dp); + +/* + * Per-txg object lists. + */ + +#define TXG_CLEAN(txg) ((txg) - 1) + +extern void txg_list_create(txg_list_t *tl, size_t offset); +extern void txg_list_destroy(txg_list_t *tl); +extern int txg_list_empty(txg_list_t *tl, uint64_t txg); +extern int txg_list_add(txg_list_t *tl, void *p, uint64_t txg); +extern void *txg_list_remove(txg_list_t *tl, uint64_t txg); +extern void *txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg); +extern int txg_list_member(txg_list_t *tl, void *p, uint64_t txg); +extern void *txg_list_head(txg_list_t *tl, uint64_t txg); +extern void *txg_list_next(txg_list_t *tl, void *p, uint64_t txg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_TXG_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg_impl.h new file mode 100644 index 000000000000..7413c662b355 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/txg_impl.h @@ -0,0 +1,73 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_TXG_IMPL_H +#define _SYS_TXG_IMPL_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct tx_cpu { + kmutex_t tc_lock; + kcondvar_t tc_cv[TXG_SIZE]; + uint64_t tc_count[TXG_SIZE]; + char tc_pad[16]; +}; + +typedef struct tx_state { + tx_cpu_t *tx_cpu; /* protects right to enter txg */ + kmutex_t tx_sync_lock; /* protects tx_state_t */ + krwlock_t tx_suspend; + uint64_t tx_open_txg; /* currently open txg id */ + uint64_t tx_quiesced_txg; /* quiesced txg waiting for sync */ + uint64_t tx_syncing_txg; /* currently syncing txg id */ + uint64_t tx_synced_txg; /* last synced txg id */ + + uint64_t tx_sync_txg_waiting; /* txg we're waiting to sync */ + uint64_t tx_quiesce_txg_waiting; /* txg we're waiting to open */ + + kcondvar_t tx_sync_more_cv; + kcondvar_t tx_sync_done_cv; + kcondvar_t tx_quiesce_more_cv; + kcondvar_t tx_quiesce_done_cv; + kcondvar_t tx_timeout_cv; + kcondvar_t tx_exit_cv; /* wait for all threads to exit */ + + uint8_t tx_threads; /* number of threads */ + uint8_t tx_exiting; /* set when we're exiting */ + + kthread_t *tx_sync_thread; + kthread_t *tx_quiesce_thread; +} tx_state_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_TXG_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock.h new file mode 100644 index 000000000000..93d936ae4b18 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock.h @@ -0,0 +1,50 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_UBERBLOCK_H +#define _SYS_UBERBLOCK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct uberblock uberblock_t; + +extern int uberblock_verify(uberblock_t *ub); +extern int uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_UBERBLOCK_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock_impl.h new file mode 100644 index 000000000000..55a0dd5aec0d --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/uberblock_impl.h @@ -0,0 +1,63 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_UBERBLOCK_IMPL_H +#define _SYS_UBERBLOCK_IMPL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The uberblock version is incremented whenever an incompatible on-disk + * format change is made to the SPA, DMU, or ZAP. + * + * Note: the first two fields should never be moved. When a storage pool + * is opened, the uberblock must be read off the disk before the version + * can be checked. If the ub_version field is moved, we may not detect + * version mismatch. If the ub_magic field is moved, applications that + * expect the magic number in the first word won't work. + */ +#define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */ +#define UBERBLOCK_SHIFT 10 /* up to 1K */ + +struct uberblock { + uint64_t ub_magic; /* UBERBLOCK_MAGIC */ + uint64_t ub_version; /* SPA_VERSION */ + uint64_t ub_txg; /* txg of last sync */ + uint64_t ub_guid_sum; /* sum of all vdev guids */ + uint64_t ub_timestamp; /* UTC time of last sync */ + blkptr_t ub_rootbp; /* MOS objset_phys_t */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_UBERBLOCK_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/unique.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/unique.h new file mode 100644 index 000000000000..2ef3093edf1c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/unique.h @@ -0,0 +1,59 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_UNIQUE_H +#define _SYS_UNIQUE_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* The number of significant bits in each unique value. */ +#define UNIQUE_BITS 56 + +void unique_init(void); +void unique_fini(void); + +/* + * Return a new unique value (which will not be uniquified against until + * it is unique_insert()-ed. + */ +uint64_t unique_create(void); + +/* Return a unique value, which equals the one passed in if possible. */ +uint64_t unique_insert(uint64_t value); + +/* Indicate that this value no longer needs to be uniquified against. */ +void unique_remove(uint64_t value); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_UNIQUE_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev.h new file mode 100644 index 000000000000..c070d6f3d623 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev.h @@ -0,0 +1,135 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_VDEV_H +#define _SYS_VDEV_H + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern boolean_t zfs_nocacheflush; + +extern int vdev_open(vdev_t *); +extern int vdev_validate(vdev_t *); +extern void vdev_close(vdev_t *); +extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace); +extern void vdev_init(vdev_t *, uint64_t txg); +extern void vdev_reopen(vdev_t *); +extern int vdev_validate_aux(vdev_t *vd); +extern zio_t *vdev_probe(vdev_t *vd, zio_t *pio); + +extern boolean_t vdev_is_bootable(vdev_t *vd); +extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev); +extern vdev_t *vdev_lookup_by_guid(vdev_t *vd, uint64_t guid); +extern void vdev_dtl_dirty(space_map_t *sm, uint64_t txg, uint64_t size); +extern int vdev_dtl_contains(space_map_t *sm, uint64_t txg, uint64_t size); +extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, + int scrub_done); +extern boolean_t vdev_resilver_needed(vdev_t *vd, + uint64_t *minp, uint64_t *maxp); + +extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg); +extern void vdev_metaslab_fini(vdev_t *vd); + +extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs); +extern void vdev_clear_stats(vdev_t *vd); +extern void vdev_stat_update(zio_t *zio, uint64_t psize); +extern void vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, + boolean_t complete); +extern int vdev_getspec(spa_t *spa, uint64_t vdev, char **vdev_spec); +extern void vdev_propagate_state(vdev_t *vd); +extern void vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, + vdev_aux_t aux); + +extern void vdev_space_update(vdev_t *vd, int64_t space_delta, + int64_t alloc_delta, boolean_t update_root); + +extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize); + +extern int vdev_fault(spa_t *spa, uint64_t guid); +extern int vdev_degrade(spa_t *spa, uint64_t guid); +extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, + vdev_state_t *); +extern int vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags); +extern void vdev_clear(spa_t *spa, vdev_t *vd); + +extern boolean_t vdev_is_dead(vdev_t *vd); +extern boolean_t vdev_readable(vdev_t *vd); +extern boolean_t vdev_writeable(vdev_t *vd); +extern boolean_t vdev_allocatable(vdev_t *vd); +extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio); + +extern void vdev_cache_init(vdev_t *vd); +extern void vdev_cache_fini(vdev_t *vd); +extern int vdev_cache_read(zio_t *zio); +extern void vdev_cache_write(zio_t *zio); +extern void vdev_cache_purge(vdev_t *vd); + +extern void vdev_queue_init(vdev_t *vd); +extern void vdev_queue_fini(vdev_t *vd); +extern zio_t *vdev_queue_io(zio_t *zio); +extern void vdev_queue_io_done(zio_t *zio); + +extern void vdev_config_dirty(vdev_t *vd); +extern void vdev_config_clean(vdev_t *vd); +extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg); + +extern void vdev_state_dirty(vdev_t *vd); +extern void vdev_state_clean(vdev_t *vd); + +extern nvlist_t *vdev_config_generate(spa_t *spa, vdev_t *vd, + boolean_t getstats, boolean_t isspare, boolean_t isl2cache); + +/* + * Label routines + */ +struct uberblock; +extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset); +extern int vdev_label_number(uint64_t psise, uint64_t offset); +extern nvlist_t *vdev_label_read_config(vdev_t *vd); +extern void vdev_uberblock_load(zio_t *zio, vdev_t *vd, struct uberblock *ub); + +typedef enum { + VDEV_LABEL_CREATE, /* create/add a new device */ + VDEV_LABEL_REPLACE, /* replace an existing device */ + VDEV_LABEL_SPARE, /* add a new hot spare */ + VDEV_LABEL_REMOVE, /* remove an existing device */ + VDEV_LABEL_L2CACHE /* add an L2ARC cache device */ +} vdev_labeltype_t; + +extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_VDEV_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_disk.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_disk.h new file mode 100644 index 000000000000..b748571ea0c3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_disk.h @@ -0,0 +1,56 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_VDEV_DISK_H +#define _SYS_VDEV_DISK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#ifdef _KERNEL +#include +#include +#include +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct vdev_disk { + ddi_devid_t vd_devid; + char *vd_minor; + ldi_handle_t vd_lh; +} vdev_disk_t; + +#ifdef _KERNEL +extern int vdev_disk_physio(ldi_handle_t, caddr_t, size_t, uint64_t, int); +#endif +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_VDEV_DISK_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_file.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_file.h new file mode 100644 index 000000000000..cd496735778c --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_file.h @@ -0,0 +1,46 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_VDEV_FILE_H +#define _SYS_VDEV_FILE_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct vdev_file { + vnode_t *vf_vnode; +} vdev_file_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_VDEV_FILE_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_impl.h new file mode 100644 index 000000000000..26904d089a3b --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/vdev_impl.h @@ -0,0 +1,305 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_VDEV_IMPL_H +#define _SYS_VDEV_IMPL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Virtual device descriptors. + * + * All storage pool operations go through the virtual device framework, + * which provides data replication and I/O scheduling. + */ + +/* + * Forward declarations that lots of things need. + */ +typedef struct vdev_queue vdev_queue_t; +typedef struct vdev_cache vdev_cache_t; +typedef struct vdev_cache_entry vdev_cache_entry_t; + +/* + * Virtual device operations + */ +typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *ashift); +typedef void vdev_close_func_t(vdev_t *vd); +typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize); +typedef int vdev_io_start_func_t(zio_t *zio); +typedef void vdev_io_done_func_t(zio_t *zio); +typedef void vdev_state_change_func_t(vdev_t *vd, int, int); + +typedef struct vdev_ops { + vdev_open_func_t *vdev_op_open; + vdev_close_func_t *vdev_op_close; + vdev_asize_func_t *vdev_op_asize; + vdev_io_start_func_t *vdev_op_io_start; + vdev_io_done_func_t *vdev_op_io_done; + vdev_state_change_func_t *vdev_op_state_change; + char vdev_op_type[16]; + boolean_t vdev_op_leaf; +} vdev_ops_t; + +/* + * Virtual device properties + */ +struct vdev_cache_entry { + char *ve_data; + uint64_t ve_offset; + uint64_t ve_lastused; + avl_node_t ve_offset_node; + avl_node_t ve_lastused_node; + uint32_t ve_hits; + uint16_t ve_missed_update; + zio_t *ve_fill_io; +}; + +struct vdev_cache { + avl_tree_t vc_offset_tree; + avl_tree_t vc_lastused_tree; + kmutex_t vc_lock; +}; + +struct vdev_queue { + avl_tree_t vq_deadline_tree; + avl_tree_t vq_read_tree; + avl_tree_t vq_write_tree; + avl_tree_t vq_pending_tree; + kmutex_t vq_lock; +}; + +/* + * Virtual device descriptor + */ +struct vdev { + /* + * Common to all vdev types. + */ + uint64_t vdev_id; /* child number in vdev parent */ + uint64_t vdev_guid; /* unique ID for this vdev */ + uint64_t vdev_guid_sum; /* self guid + all child guids */ + uint64_t vdev_asize; /* allocatable device capacity */ + uint64_t vdev_ashift; /* block alignment shift */ + uint64_t vdev_state; /* see VDEV_STATE_* #defines */ + uint64_t vdev_prevstate; /* used when reopening a vdev */ + vdev_ops_t *vdev_ops; /* vdev operations */ + spa_t *vdev_spa; /* spa for this vdev */ + void *vdev_tsd; /* type-specific data */ + vdev_t *vdev_top; /* top-level vdev */ + vdev_t *vdev_parent; /* parent vdev */ + vdev_t **vdev_child; /* array of children */ + uint64_t vdev_children; /* number of children */ + space_map_t vdev_dtl_map; /* dirty time log in-core state */ + space_map_t vdev_dtl_scrub; /* DTL for scrub repair writes */ + vdev_stat_t vdev_stat; /* virtual device statistics */ + + /* + * Top-level vdev state. + */ + uint64_t vdev_ms_array; /* metaslab array object */ + uint64_t vdev_ms_shift; /* metaslab size shift */ + uint64_t vdev_ms_count; /* number of metaslabs */ + metaslab_group_t *vdev_mg; /* metaslab group */ + metaslab_t **vdev_ms; /* metaslab array */ + txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */ + txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */ + txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */ + boolean_t vdev_remove_wanted; /* async remove wanted? */ + boolean_t vdev_probe_wanted; /* async probe wanted? */ + list_node_t vdev_config_dirty_node; /* config dirty list */ + list_node_t vdev_state_dirty_node; /* state dirty list */ + uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */ + uint64_t vdev_islog; /* is an intent log device */ + + /* + * Leaf vdev state. + */ + uint64_t vdev_psize; /* physical device capacity */ + space_map_obj_t vdev_dtl; /* dirty time log on-disk state */ + txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */ + uint64_t vdev_wholedisk; /* true if this is a whole disk */ + uint64_t vdev_offline; /* persistent offline state */ + uint64_t vdev_faulted; /* persistent faulted state */ + uint64_t vdev_degraded; /* persistent degraded state */ + uint64_t vdev_removed; /* persistent removed state */ + uint64_t vdev_nparity; /* number of parity devices for raidz */ + char *vdev_path; /* vdev path (if any) */ + char *vdev_devid; /* vdev devid (if any) */ + char *vdev_physpath; /* vdev device path (if any) */ + uint64_t vdev_not_present; /* not present during import */ + uint64_t vdev_unspare; /* unspare when resilvering done */ + hrtime_t vdev_last_try; /* last reopen time */ + boolean_t vdev_nowritecache; /* true if flushwritecache failed */ + boolean_t vdev_checkremove; /* temporary online test */ + boolean_t vdev_forcefault; /* force online fault */ + uint8_t vdev_tmpoffline; /* device taken offline temporarily? */ + uint8_t vdev_detached; /* device detached? */ + uint8_t vdev_cant_read; /* vdev is failing all reads */ + uint8_t vdev_cant_write; /* vdev is failing all writes */ + uint64_t vdev_isspare; /* was a hot spare */ + uint64_t vdev_isl2cache; /* was a l2cache device */ + vdev_queue_t vdev_queue; /* I/O deadline schedule queue */ + vdev_cache_t vdev_cache; /* physical block cache */ + spa_aux_vdev_t *vdev_aux; /* for l2cache vdevs */ + zio_t *vdev_probe_zio; /* root of current probe */ + + /* + * For DTrace to work in userland (libzpool) context, these fields must + * remain at the end of the structure. DTrace will use the kernel's + * CTF definition for 'struct vdev', and since the size of a kmutex_t is + * larger in userland, the offsets for the rest fields would be + * incorrect. + */ + kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */ + kmutex_t vdev_stat_lock; /* vdev_stat */ + kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */ +}; + +#define VDEV_SKIP_SIZE (8 << 10) +#define VDEV_BOOT_HEADER_SIZE (8 << 10) +#define VDEV_PHYS_SIZE (112 << 10) +#define VDEV_UBERBLOCK_RING (128 << 10) + +#define VDEV_UBERBLOCK_SHIFT(vd) \ + MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT) +#define VDEV_UBERBLOCK_COUNT(vd) \ + (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd)) +#define VDEV_UBERBLOCK_OFFSET(vd, n) \ + offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)]) +#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd)) + +/* ZFS boot block */ +#define VDEV_BOOT_MAGIC 0x2f5b007b10cULL +#define VDEV_BOOT_VERSION 1 /* version number */ + +typedef struct vdev_boot_header { + uint64_t vb_magic; /* VDEV_BOOT_MAGIC */ + uint64_t vb_version; /* VDEV_BOOT_VERSION */ + uint64_t vb_offset; /* start offset (bytes) */ + uint64_t vb_size; /* size (bytes) */ + char vb_pad[VDEV_BOOT_HEADER_SIZE - 4 * sizeof (uint64_t)]; +} vdev_boot_header_t; + +typedef struct vdev_phys { + char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_block_tail_t)]; + zio_block_tail_t vp_zbt; +} vdev_phys_t; + +typedef struct vdev_label { + char vl_pad[VDEV_SKIP_SIZE]; /* 8K */ + vdev_boot_header_t vl_boot_header; /* 8K */ + vdev_phys_t vl_vdev_phys; /* 112K */ + char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */ +} vdev_label_t; /* 256K total */ + +/* + * vdev_dirty() flags + */ +#define VDD_METASLAB 0x01 +#define VDD_DTL 0x02 + +/* + * Size and offset of embedded boot loader region on each label. + * The total size of the first two labels plus the boot area is 4MB. + */ +#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t)) +#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */ + +/* + * Size of label regions at the start and end of each leaf device. + */ +#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE) +#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t)) +#define VDEV_LABELS 4 + +#define VDEV_ALLOC_LOAD 0 +#define VDEV_ALLOC_ADD 1 +#define VDEV_ALLOC_SPARE 2 +#define VDEV_ALLOC_L2CACHE 3 + +/* + * Allocate or free a vdev + */ +extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config, + vdev_t *parent, uint_t id, int alloctype); +extern void vdev_free(vdev_t *vd); + +/* + * Add or remove children and parents + */ +extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd); +extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd); +extern void vdev_compact_children(vdev_t *pvd); +extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops); +extern void vdev_remove_parent(vdev_t *cvd); + +/* + * vdev sync load and sync + */ +extern void vdev_load(vdev_t *vd); +extern void vdev_sync(vdev_t *vd, uint64_t txg); +extern void vdev_sync_done(vdev_t *vd, uint64_t txg); +extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg); + +/* + * Available vdev types. + */ +extern vdev_ops_t vdev_root_ops; +extern vdev_ops_t vdev_mirror_ops; +extern vdev_ops_t vdev_replacing_ops; +extern vdev_ops_t vdev_raidz_ops; +extern vdev_ops_t vdev_disk_ops; +extern vdev_ops_t vdev_file_ops; +extern vdev_ops_t vdev_missing_ops; +extern vdev_ops_t vdev_spare_ops; + +/* + * Common size functions + */ +extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize); +extern uint64_t vdev_get_rsize(vdev_t *vd); + +/* + * zdb uses this tunable, so it must be declared here to make lint happy. + */ +extern int zfs_vdev_cache_size; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_VDEV_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap.h new file mode 100644 index 000000000000..f88cc068bd57 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap.h @@ -0,0 +1,425 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZAP_H +#define _SYS_ZAP_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * ZAP - ZFS Attribute Processor + * + * The ZAP is a module which sits on top of the DMU (Data Management + * Unit) and implements a higher-level storage primitive using DMU + * objects. Its primary consumer is the ZPL (ZFS Posix Layer). + * + * A "zapobj" is a DMU object which the ZAP uses to stores attributes. + * Users should use only zap routines to access a zapobj - they should + * not access the DMU object directly using DMU routines. + * + * The attributes stored in a zapobj are name-value pairs. The name is + * a zero-terminated string of up to ZAP_MAXNAMELEN bytes (including + * terminating NULL). The value is an array of integers, which may be + * 1, 2, 4, or 8 bytes long. The total space used by the array (number + * of integers * integer length) can be up to ZAP_MAXVALUELEN bytes. + * Note that an 8-byte integer value can be used to store the location + * (object number) of another dmu object (which may be itself a zapobj). + * Note that you can use a zero-length attribute to store a single bit + * of information - the attribute is present or not. + * + * The ZAP routines are thread-safe. However, you must observe the + * DMU's restriction that a transaction may not be operated on + * concurrently. + * + * Any of the routines that return an int may return an I/O error (EIO + * or ECHECKSUM). + * + * + * Implementation / Performance Notes: + * + * The ZAP is intended to operate most efficiently on attributes with + * short (49 bytes or less) names and single 8-byte values, for which + * the microzap will be used. The ZAP should be efficient enough so + * that the user does not need to cache these attributes. + * + * The ZAP's locking scheme makes its routines thread-safe. Operations + * on different zapobjs will be processed concurrently. Operations on + * the same zapobj which only read data will be processed concurrently. + * Operations on the same zapobj which modify data will be processed + * concurrently when there are many attributes in the zapobj (because + * the ZAP uses per-block locking - more than 128 * (number of cpus) + * small attributes will suffice). + */ + +/* + * We're using zero-terminated byte strings (ie. ASCII or UTF-8 C + * strings) for the names of attributes, rather than a byte string + * bounded by an explicit length. If some day we want to support names + * in character sets which have embedded zeros (eg. UTF-16, UTF-32), + * we'll have to add routines for using length-bounded strings. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZAP_MAXNAMELEN 256 +#define ZAP_MAXVALUELEN 1024 + +/* + * The matchtype specifies which entry will be accessed. + * MT_EXACT: only find an exact match (non-normalized) + * MT_FIRST: find the "first" normalized (case and Unicode + * form) match; the designated "first" match will not change as long + * as the set of entries with this normalization doesn't change + * MT_BEST: if there is an exact match, find that, otherwise find the + * first normalized match + */ +typedef enum matchtype +{ + MT_EXACT, + MT_BEST, + MT_FIRST +} matchtype_t; + +/* + * Create a new zapobj with no attributes and return its object number. + * MT_EXACT will cause the zap object to only support MT_EXACT lookups, + * otherwise any matchtype can be used for lookups. + * + * normflags specifies what normalization will be done. values are: + * 0: no normalization (legacy on-disk format, supports MT_EXACT matching + * only) + * U8_TEXTPREP_TOLOWER: case normalization will be performed. + * MT_FIRST/MT_BEST matching will find entries that match without + * regard to case (eg. looking for "foo" can find an entry "Foo"). + * Eventually, other flags will permit unicode normalization as well. + */ +uint64_t zap_create(objset_t *ds, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); +uint64_t zap_create_norm(objset_t *ds, int normflags, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); + +/* + * Create a new zapobj with no attributes from the given (unallocated) + * object number. + */ +int zap_create_claim(objset_t *ds, uint64_t obj, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); +int zap_create_claim_norm(objset_t *ds, uint64_t obj, + int normflags, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx); + +/* + * The zapobj passed in must be a valid ZAP object for all of the + * following routines. + */ + +/* + * Destroy this zapobj and all its attributes. + * + * Frees the object number using dmu_object_free. + */ +int zap_destroy(objset_t *ds, uint64_t zapobj, dmu_tx_t *tx); + +/* + * Manipulate attributes. + * + * 'integer_size' is in bytes, and must be 1, 2, 4, or 8. + */ + +/* + * Retrieve the contents of the attribute with the given name. + * + * If the requested attribute does not exist, the call will fail and + * return ENOENT. + * + * If 'integer_size' is smaller than the attribute's integer size, the + * call will fail and return EINVAL. + * + * If 'integer_size' is equal to or larger than the attribute's integer + * size, the call will succeed and return 0. * When converting to a + * larger integer size, the integers will be treated as unsigned (ie. no + * sign-extension will be performed). + * + * 'num_integers' is the length (in integers) of 'buf'. + * + * If the attribute is longer than the buffer, as many integers as will + * fit will be transferred to 'buf'. If the entire attribute was not + * transferred, the call will return EOVERFLOW. + * + * If rn_len is nonzero, realname will be set to the name of the found + * entry (which may be different from the requested name if matchtype is + * not MT_EXACT). + * + * If normalization_conflictp is not NULL, it will be set if there is + * another name with the same case/unicode normalized form. + */ +int zap_lookup(objset_t *ds, uint64_t zapobj, const char *name, + uint64_t integer_size, uint64_t num_integers, void *buf); +int zap_lookup_norm(objset_t *ds, uint64_t zapobj, const char *name, + uint64_t integer_size, uint64_t num_integers, void *buf, + matchtype_t mt, char *realname, int rn_len, + boolean_t *normalization_conflictp); + +/* + * Create an attribute with the given name and value. + * + * If an attribute with the given name already exists, the call will + * fail and return EEXIST. + */ +int zap_add(objset_t *ds, uint64_t zapobj, const char *name, + int integer_size, uint64_t num_integers, + const void *val, dmu_tx_t *tx); + +/* + * Set the attribute with the given name to the given value. If an + * attribute with the given name does not exist, it will be created. If + * an attribute with the given name already exists, the previous value + * will be overwritten. The integer_size may be different from the + * existing attribute's integer size, in which case the attribute's + * integer size will be updated to the new value. + */ +int zap_update(objset_t *ds, uint64_t zapobj, const char *name, + int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); + +/* + * Get the length (in integers) and the integer size of the specified + * attribute. + * + * If the requested attribute does not exist, the call will fail and + * return ENOENT. + */ +int zap_length(objset_t *ds, uint64_t zapobj, const char *name, + uint64_t *integer_size, uint64_t *num_integers); + +/* + * Remove the specified attribute. + * + * If the specified attribute does not exist, the call will fail and + * return ENOENT. + */ +int zap_remove(objset_t *ds, uint64_t zapobj, const char *name, dmu_tx_t *tx); +int zap_remove_norm(objset_t *ds, uint64_t zapobj, const char *name, + matchtype_t mt, dmu_tx_t *tx); + +/* + * Returns (in *count) the number of attributes in the specified zap + * object. + */ +int zap_count(objset_t *ds, uint64_t zapobj, uint64_t *count); + + +/* + * Returns (in name) the name of the entry whose (value & mask) + * (za_first_integer) is value, or ENOENT if not found. The string + * pointed to by name must be at least 256 bytes long. If mask==0, the + * match must be exact (ie, same as mask=-1ULL). + */ +int zap_value_search(objset_t *os, uint64_t zapobj, + uint64_t value, uint64_t mask, char *name); + +/* + * Transfer all the entries from fromobj into intoobj. Only works on + * int_size=8 num_integers=1 values. Fails if there are any duplicated + * entries. + */ +int zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx); + +/* + * Manipulate entries where the name + value are the "same" (the name is + * a stringified version of the value). + */ +int zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx); +int zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx); +int zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value); + +struct zap; +struct zap_leaf; +typedef struct zap_cursor { + /* This structure is opaque! */ + objset_t *zc_objset; + struct zap *zc_zap; + struct zap_leaf *zc_leaf; + uint64_t zc_zapobj; + uint64_t zc_hash; + uint32_t zc_cd; +} zap_cursor_t; + +typedef struct { + int za_integer_length; + /* + * za_normalization_conflict will be set if there are additional + * entries with this normalized form (eg, "foo" and "Foo"). + */ + boolean_t za_normalization_conflict; + uint64_t za_num_integers; + uint64_t za_first_integer; /* no sign extension for <8byte ints */ + char za_name[MAXNAMELEN]; +} zap_attribute_t; + +/* + * The interface for listing all the attributes of a zapobj can be + * thought of as cursor moving down a list of the attributes one by + * one. The cookie returned by the zap_cursor_serialize routine is + * persistent across system calls (and across reboot, even). + */ + +/* + * Initialize a zap cursor, pointing to the "first" attribute of the + * zapobj. You must _fini the cursor when you are done with it. + */ +void zap_cursor_init(zap_cursor_t *zc, objset_t *ds, uint64_t zapobj); +void zap_cursor_fini(zap_cursor_t *zc); + +/* + * Get the attribute currently pointed to by the cursor. Returns + * ENOENT if at the end of the attributes. + */ +int zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za); + +/* + * Advance the cursor to the next attribute. + */ +void zap_cursor_advance(zap_cursor_t *zc); + +/* + * Get a persistent cookie pointing to the current position of the zap + * cursor. The low 4 bits in the cookie are always zero, and thus can + * be used as to differentiate a serialized cookie from a different type + * of value. The cookie will be less than 2^32 as long as there are + * fewer than 2^22 (4.2 million) entries in the zap object. + */ +uint64_t zap_cursor_serialize(zap_cursor_t *zc); + +/* + * Initialize a zap cursor pointing to the position recorded by + * zap_cursor_serialize (in the "serialized" argument). You can also + * use a "serialized" argument of 0 to start at the beginning of the + * zapobj (ie. zap_cursor_init_serialized(..., 0) is equivalent to + * zap_cursor_init(...).) + */ +void zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *ds, + uint64_t zapobj, uint64_t serialized); + + +#define ZAP_HISTOGRAM_SIZE 10 + +typedef struct zap_stats { + /* + * Size of the pointer table (in number of entries). + * This is always a power of 2, or zero if it's a microzap. + * In general, it should be considerably greater than zs_num_leafs. + */ + uint64_t zs_ptrtbl_len; + + uint64_t zs_blocksize; /* size of zap blocks */ + + /* + * The number of blocks used. Note that some blocks may be + * wasted because old ptrtbl's and large name/value blocks are + * not reused. (Although their space is reclaimed, we don't + * reuse those offsets in the object.) + */ + uint64_t zs_num_blocks; + + /* + * Pointer table values from zap_ptrtbl in the zap_phys_t + */ + uint64_t zs_ptrtbl_nextblk; /* next (larger) copy start block */ + uint64_t zs_ptrtbl_blks_copied; /* number source blocks copied */ + uint64_t zs_ptrtbl_zt_blk; /* starting block number */ + uint64_t zs_ptrtbl_zt_numblks; /* number of blocks */ + uint64_t zs_ptrtbl_zt_shift; /* bits to index it */ + + /* + * Values of the other members of the zap_phys_t + */ + uint64_t zs_block_type; /* ZBT_HEADER */ + uint64_t zs_magic; /* ZAP_MAGIC */ + uint64_t zs_num_leafs; /* The number of leaf blocks */ + uint64_t zs_num_entries; /* The number of zap entries */ + uint64_t zs_salt; /* salt to stir into hash function */ + + /* + * Histograms. For all histograms, the last index + * (ZAP_HISTOGRAM_SIZE-1) includes any values which are greater + * than what can be represented. For example + * zs_leafs_with_n5_entries[ZAP_HISTOGRAM_SIZE-1] is the number + * of leafs with more than 45 entries. + */ + + /* + * zs_leafs_with_n_pointers[n] is the number of leafs with + * 2^n pointers to it. + */ + uint64_t zs_leafs_with_2n_pointers[ZAP_HISTOGRAM_SIZE]; + + /* + * zs_leafs_with_n_entries[n] is the number of leafs with + * [n*5, (n+1)*5) entries. In the current implementation, there + * can be at most 55 entries in any block, but there may be + * fewer if the name or value is large, or the block is not + * completely full. + */ + uint64_t zs_blocks_with_n5_entries[ZAP_HISTOGRAM_SIZE]; + + /* + * zs_leafs_n_tenths_full[n] is the number of leafs whose + * fullness is in the range [n/10, (n+1)/10). + */ + uint64_t zs_blocks_n_tenths_full[ZAP_HISTOGRAM_SIZE]; + + /* + * zs_entries_using_n_chunks[n] is the number of entries which + * consume n 24-byte chunks. (Note, large names/values only use + * one chunk, but contribute to zs_num_blocks_large.) + */ + uint64_t zs_entries_using_n_chunks[ZAP_HISTOGRAM_SIZE]; + + /* + * zs_buckets_with_n_entries[n] is the number of buckets (each + * leaf has 64 buckets) with n entries. + * zs_buckets_with_n_entries[1] should be very close to + * zs_num_entries. + */ + uint64_t zs_buckets_with_n_entries[ZAP_HISTOGRAM_SIZE]; +} zap_stats_t; + +/* + * Get statistics about a ZAP object. Note: you need to be aware of the + * internal implementation of the ZAP to correctly interpret some of the + * statistics. This interface shouldn't be relied on unless you really + * know what you're doing. + */ +int zap_get_stats(objset_t *ds, uint64_t zapobj, zap_stats_t *zs); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZAP_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_impl.h new file mode 100644 index 000000000000..0dc02ab6b0ac --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_impl.h @@ -0,0 +1,218 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZAP_IMPL_H +#define _SYS_ZAP_IMPL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +extern int fzap_default_block_shift; + +#define ZAP_MAGIC 0x2F52AB2ABULL + +#define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_f.zap_block_shift) + +#define ZAP_MAXCD (uint32_t)(-1) +#define ZAP_HASHBITS 28 +#define MZAP_ENT_LEN 64 +#define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2) +#define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT +#define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT) + +typedef struct mzap_ent_phys { + uint64_t mze_value; + uint32_t mze_cd; + uint16_t mze_pad; /* in case we want to chain them someday */ + char mze_name[MZAP_NAME_LEN]; +} mzap_ent_phys_t; + +typedef struct mzap_phys { + uint64_t mz_block_type; /* ZBT_MICRO */ + uint64_t mz_salt; + uint64_t mz_normflags; + uint64_t mz_pad[5]; + mzap_ent_phys_t mz_chunk[1]; + /* actually variable size depending on block size */ +} mzap_phys_t; + +typedef struct mzap_ent { + avl_node_t mze_node; + int mze_chunkid; + uint64_t mze_hash; + mzap_ent_phys_t mze_phys; +} mzap_ent_t; + + +/* + * The (fat) zap is stored in one object. It is an array of + * 1<= 6] [zap_leaf_t] [ptrtbl] ... + * + */ + +struct dmu_buf; +struct zap_leaf; + +#define ZBT_LEAF ((1ULL << 63) + 0) +#define ZBT_HEADER ((1ULL << 63) + 1) +#define ZBT_MICRO ((1ULL << 63) + 3) +/* any other values are ptrtbl blocks */ + +/* + * the embedded pointer table takes up half a block: + * block size / entry size (2^3) / 2 + */ +#define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1) + +/* + * The embedded pointer table starts half-way through the block. Since + * the pointer table itself is half the block, it starts at (64-bit) + * word number (1<zap_f.zap_phys) \ + [(idx) + (1<> (64 - (n)))) + +void fzap_byteswap(void *buf, size_t size); +int fzap_count(zap_t *zap, uint64_t *count); +int fzap_lookup(zap_name_t *zn, + uint64_t integer_size, uint64_t num_integers, void *buf, + char *realname, int rn_len, boolean_t *normalization_conflictp); +int fzap_add(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers, + const void *val, dmu_tx_t *tx); +int fzap_update(zap_name_t *zn, + int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); +int fzap_length(zap_name_t *zn, + uint64_t *integer_size, uint64_t *num_integers); +int fzap_remove(zap_name_t *zn, dmu_tx_t *tx); +int fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za); +void fzap_get_stats(zap_t *zap, zap_stats_t *zs); +void zap_put_leaf(struct zap_leaf *l); + +int fzap_add_cd(zap_name_t *zn, + uint64_t integer_size, uint64_t num_integers, + const void *val, uint32_t cd, dmu_tx_t *tx); +void fzap_upgrade(zap_t *zap, dmu_tx_t *tx); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZAP_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_leaf.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_leaf.h new file mode 100644 index 000000000000..14144e059e54 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zap_leaf.h @@ -0,0 +1,244 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZAP_LEAF_H +#define _SYS_ZAP_LEAF_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +struct zap; + +#define ZAP_LEAF_MAGIC 0x2AB1EAF + +/* chunk size = 24 bytes */ +#define ZAP_LEAF_CHUNKSIZE 24 + +/* + * The amount of space available for chunks is: + * block size (1<l_bs) - hash entry size (2) * number of hash + * entries - header space (2*chunksize) + */ +#define ZAP_LEAF_NUMCHUNKS(l) \ + (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \ + ZAP_LEAF_CHUNKSIZE - 2) + +/* + * The amount of space within the chunk available for the array is: + * chunk size - space for type (1) - space for next pointer (2) + */ +#define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3) + +#define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \ + (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES) + +/* + * Low water mark: when there are only this many chunks free, start + * growing the ptrtbl. Ideally, this should be larger than a + * "reasonably-sized" entry. 20 chunks is more than enough for the + * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value), + * while still being only around 3% for 16k blocks. + */ +#define ZAP_LEAF_LOW_WATER (20) + +/* + * The leaf hash table has block size / 2^5 (32) number of entries, + * which should be more than enough for the maximum number of entries, + * which is less than block size / CHUNKSIZE (24) / minimum number of + * chunks per entry (3). + */ +#define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5) +#define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l)) + +/* + * The chunks start immediately after the hash table. The end of the + * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a + * chunk_t. + */ +#define ZAP_LEAF_CHUNK(l, idx) \ + ((zap_leaf_chunk_t *) \ + ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx] +#define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry) + +typedef enum zap_chunk_type { + ZAP_CHUNK_FREE = 253, + ZAP_CHUNK_ENTRY = 252, + ZAP_CHUNK_ARRAY = 251, + ZAP_CHUNK_TYPE_MAX = 250 +} zap_chunk_type_t; + +#define ZLF_ENTRIES_CDSORTED (1<<0) + +/* + * TAKE NOTE: + * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. + */ +typedef struct zap_leaf_phys { + struct zap_leaf_header { + uint64_t lh_block_type; /* ZBT_LEAF */ + uint64_t lh_pad1; + uint64_t lh_prefix; /* hash prefix of this leaf */ + uint32_t lh_magic; /* ZAP_LEAF_MAGIC */ + uint16_t lh_nfree; /* number free chunks */ + uint16_t lh_nentries; /* number of entries */ + uint16_t lh_prefix_len; /* num bits used to id this */ + +/* above is accessable to zap, below is zap_leaf private */ + + uint16_t lh_freelist; /* chunk head of free list */ + uint8_t lh_flags; /* ZLF_* flags */ + uint8_t lh_pad2[11]; + } l_hdr; /* 2 24-byte chunks */ + + /* + * The header is followed by a hash table with + * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is + * followed by an array of ZAP_LEAF_NUMCHUNKS(zap) + * zap_leaf_chunk structures. These structures are accessed + * with the ZAP_LEAF_CHUNK() macro. + */ + + uint16_t l_hash[1]; +} zap_leaf_phys_t; + +typedef union zap_leaf_chunk { + struct zap_leaf_entry { + uint8_t le_type; /* always ZAP_CHUNK_ENTRY */ + uint8_t le_int_size; /* size of ints */ + uint16_t le_next; /* next entry in hash chain */ + uint16_t le_name_chunk; /* first chunk of the name */ + uint16_t le_name_length; /* bytes in name, incl null */ + uint16_t le_value_chunk; /* first chunk of the value */ + uint16_t le_value_length; /* value length in ints */ + uint32_t le_cd; /* collision differentiator */ + uint64_t le_hash; /* hash value of the name */ + } l_entry; + struct zap_leaf_array { + uint8_t la_type; /* always ZAP_CHUNK_ARRAY */ + uint8_t la_array[ZAP_LEAF_ARRAY_BYTES]; + uint16_t la_next; /* next blk or CHAIN_END */ + } l_array; + struct zap_leaf_free { + uint8_t lf_type; /* always ZAP_CHUNK_FREE */ + uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES]; + uint16_t lf_next; /* next in free list, or CHAIN_END */ + } l_free; +} zap_leaf_chunk_t; + +typedef struct zap_leaf { + krwlock_t l_rwlock; + uint64_t l_blkid; /* 1< +#include +#endif +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct znode_phys; + +#define ACE_SLOT_CNT 6 +#define ZFS_ACL_VERSION_INITIAL 0ULL +#define ZFS_ACL_VERSION_FUID 1ULL +#define ZFS_ACL_VERSION ZFS_ACL_VERSION_FUID + +/* + * ZFS ACLs are store in various forms. + * Files created with ACL version ZFS_ACL_VERSION_INITIAL + * will all be created with fixed length ACEs of type + * zfs_oldace_t. + * + * Files with ACL version ZFS_ACL_VERSION_FUID will be created + * with various sized ACEs. The abstraction entries will utilize + * zfs_ace_hdr_t, normal user/group entries will use zfs_ace_t + * and some specialized CIFS ACEs will use zfs_object_ace_t. + */ + +/* + * All ACEs have a common hdr. For + * owner@, group@, and everyone@ this is all + * thats needed. + */ +typedef struct zfs_ace_hdr { + uint16_t z_type; + uint16_t z_flags; + uint32_t z_access_mask; +} zfs_ace_hdr_t; + +typedef zfs_ace_hdr_t zfs_ace_abstract_t; + +/* + * Standard ACE + */ +typedef struct zfs_ace { + zfs_ace_hdr_t z_hdr; + uint64_t z_fuid; +} zfs_ace_t; + +/* + * The following type only applies to ACE_ACCESS_ALLOWED|DENIED_OBJECT_ACE_TYPE + * and will only be set/retrieved in a CIFS context. + */ + +typedef struct zfs_object_ace { + zfs_ace_t z_ace; + uint8_t z_object_type[16]; /* object type */ + uint8_t z_inherit_type[16]; /* inherited object type */ +} zfs_object_ace_t; + +typedef struct zfs_oldace { + uint32_t z_fuid; /* "who" */ + uint32_t z_access_mask; /* access mask */ + uint16_t z_flags; /* flags, i.e inheritance */ + uint16_t z_type; /* type of entry allow/deny */ +} zfs_oldace_t; + +typedef struct zfs_acl_phys_v0 { + uint64_t z_acl_extern_obj; /* ext acl pieces */ + uint32_t z_acl_count; /* Number of ACEs */ + uint16_t z_acl_version; /* acl version */ + uint16_t z_acl_pad; /* pad */ + zfs_oldace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */ +} zfs_acl_phys_v0_t; + +#define ZFS_ACE_SPACE (sizeof (zfs_oldace_t) * ACE_SLOT_CNT) + +typedef struct zfs_acl_phys { + uint64_t z_acl_extern_obj; /* ext acl pieces */ + uint32_t z_acl_size; /* Number of bytes in ACL */ + uint16_t z_acl_version; /* acl version */ + uint16_t z_acl_count; /* ace count */ + uint8_t z_ace_data[ZFS_ACE_SPACE]; /* space for embedded ACEs */ +} zfs_acl_phys_t; + + + +typedef struct acl_ops { + uint32_t (*ace_mask_get) (void *acep); /* get access mask */ + void (*ace_mask_set) (void *acep, + uint32_t mask); /* set access mask */ + uint16_t (*ace_flags_get) (void *acep); /* get flags */ + void (*ace_flags_set) (void *acep, + uint16_t flags); /* set flags */ + uint16_t (*ace_type_get)(void *acep); /* get type */ + void (*ace_type_set)(void *acep, + uint16_t type); /* set type */ + uint64_t (*ace_who_get)(void *acep); /* get who/fuid */ + void (*ace_who_set)(void *acep, + uint64_t who); /* set who/fuid */ + size_t (*ace_size)(void *acep); /* how big is this ace */ + size_t (*ace_abstract_size)(void); /* sizeof abstract entry */ + int (*ace_mask_off)(void); /* off of access mask in ace */ + int (*ace_data)(void *acep, void **datap); + /* ptr to data if any */ +} acl_ops_t; + +/* + * A zfs_acl_t structure is composed of a list of zfs_acl_node_t's. + * Each node will have one or more ACEs associated with it. You will + * only have multiple nodes during a chmod operation. Normally only + * one node is required. + */ +typedef struct zfs_acl_node { + list_node_t z_next; /* Next chunk of ACEs */ + void *z_acldata; /* pointer into actual ACE(s) */ + void *z_allocdata; /* pointer to kmem allocated memory */ + size_t z_allocsize; /* Size of blob in bytes */ + size_t z_size; /* length of ACL data */ + int z_ace_count; /* number of ACEs in this acl node */ + int z_ace_idx; /* ace iterator positioned on */ +} zfs_acl_node_t; + +typedef struct zfs_acl { + int z_acl_count; /* Number of ACEs */ + size_t z_acl_bytes; /* Number of bytes in ACL */ + uint_t z_version; /* version of ACL */ + void *z_next_ace; /* pointer to next ACE */ + int z_hints; /* ACL hints (ZFS_INHERIT_ACE ...) */ + zfs_acl_node_t *z_curr_node; /* current node iterator is handling */ + list_t z_acl; /* chunks of ACE data */ + acl_ops_t z_ops; /* ACL operations */ + boolean_t z_has_fuids; /* FUIDs present in ACL? */ +} zfs_acl_t; + +#define ACL_DATA_ALLOCED 0x1 +#define ZFS_ACL_SIZE(aclcnt) (sizeof (ace_t) * (aclcnt)) + +/* + * Property values for acl_mode and acl_inherit. + * + * acl_mode can take discard, noallow, groupmask and passthrough. + * whereas acl_inherit has secure instead of groupmask. + */ + +#define ZFS_ACL_DISCARD 0 +#define ZFS_ACL_NOALLOW 1 +#define ZFS_ACL_GROUPMASK 2 +#define ZFS_ACL_PASSTHROUGH 3 +#define ZFS_ACL_RESTRICTED 4 +#define ZFS_ACL_PASSTHROUGH_X 5 + +struct znode; +struct zfsvfs; +struct zfs_fuid_info; + +#ifdef _KERNEL +void zfs_perm_init(struct znode *, struct znode *, int, vattr_t *, + dmu_tx_t *, cred_t *, zfs_acl_t *, zfs_fuid_info_t **); +int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *); +int zfs_setacl(struct znode *, vsecattr_t *, boolean_t, cred_t *); +void zfs_acl_rele(void *); +void zfs_oldace_byteswap(ace_t *, int); +void zfs_ace_byteswap(void *, size_t, boolean_t); +extern int zfs_zaccess(struct znode *, int, int, boolean_t, cred_t *); +extern int zfs_zaccess_rwx(struct znode *, mode_t, int, cred_t *); +extern int zfs_zaccess_unix(struct znode *, mode_t, cred_t *); +extern int zfs_acl_access(struct znode *, int, cred_t *); +int zfs_acl_chmod_setattr(struct znode *, zfs_acl_t **, uint64_t); +int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *); +int zfs_zaccess_rename(struct znode *, struct znode *, + struct znode *, struct znode *, cred_t *cr); +void zfs_acl_free(zfs_acl_t *); +int zfs_vsec_2_aclp(struct zfsvfs *, vtype_t, vsecattr_t *, zfs_acl_t **); +int zfs_aclset_common(struct znode *, zfs_acl_t *, cred_t *, + struct zfs_fuid_info **, dmu_tx_t *); + +#endif + +#ifdef __cplusplus +} +#endif +#endif /* _SYS_FS_ZFS_ACL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_context.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_context.h new file mode 100644 index 000000000000..a5be3e1303db --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_context.h @@ -0,0 +1,73 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZFS_CONTEXT_H +#define _SYS_ZFS_CONTEXT_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CPU_SEQID (CPU->cpu_seqid) + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZFS_CONTEXT_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ctldir.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ctldir.h new file mode 100644 index 000000000000..ce29625d1e3a --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ctldir.h @@ -0,0 +1,74 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZFS_CTLDIR_H +#define _ZFS_CTLDIR_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZFS_CTLDIR_NAME ".zfs" + +#define zfs_has_ctldir(zdp) \ + ((zdp)->z_id == (zdp)->z_zfsvfs->z_root && \ + ((zdp)->z_zfsvfs->z_ctldir != NULL)) +#define zfs_show_ctldir(zdp) \ + (zfs_has_ctldir(zdp) && \ + ((zdp)->z_zfsvfs->z_show_ctldir)) + +void zfsctl_create(zfsvfs_t *); +void zfsctl_destroy(zfsvfs_t *); +vnode_t *zfsctl_root(znode_t *); +void zfsctl_init(void); +void zfsctl_fini(void); + +int zfsctl_rename_snapshot(const char *from, const char *to); +int zfsctl_destroy_snapshot(const char *snapname, int force); +int zfsctl_umount_snapshots(vfs_t *, int, cred_t *); + +int zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, + int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, + int *direntflags, pathname_t *realpnp); + +int zfsctl_make_fid(zfsvfs_t *zfsvfsp, uint64_t object, uint32_t gen, + fid_t *fidp); +int zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp); + +#define ZFSCTL_INO_ROOT 0x1 +#define ZFSCTL_INO_SNAPDIR 0x2 + +#ifdef __cplusplus +} +#endif + +#endif /* _ZFS_CTLDIR_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_debug.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_debug.h new file mode 100644 index 000000000000..450ac1c81b42 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_debug.h @@ -0,0 +1,75 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZFS_DEBUG_H +#define _SYS_ZFS_DEBUG_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +/* + * ZFS debugging + */ + +#if defined(DEBUG) || !defined(_KERNEL) +#define ZFS_DEBUG +#endif + +extern int zfs_flags; + +#define ZFS_DEBUG_DPRINTF 0x0001 +#define ZFS_DEBUG_DBUF_VERIFY 0x0002 +#define ZFS_DEBUG_DNODE_VERIFY 0x0004 +#define ZFS_DEBUG_SNAPNAMES 0x0008 +#define ZFS_DEBUG_MODIFY 0x0010 + +#ifdef ZFS_DEBUG +extern void __dprintf(const char *file, const char *func, + int line, const char *fmt, ...); +#define dprintf(...) \ + if (zfs_flags & ZFS_DEBUG_DPRINTF) \ + __dprintf(__FILE__, __func__, __LINE__, __VA_ARGS__) +#else +#define dprintf(...) ((void)0) +#endif /* ZFS_DEBUG */ + +extern void zfs_panic_recover(const char *fmt, ...); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZFS_DEBUG_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_dir.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_dir.h new file mode 100644 index 000000000000..ebb66e8ae4e9 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_dir.h @@ -0,0 +1,76 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_FS_ZFS_DIR_H +#define _SYS_FS_ZFS_DIR_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* zfs_dirent_lock() flags */ +#define ZNEW 0x0001 /* entry should not exist */ +#define ZEXISTS 0x0002 /* entry should exist */ +#define ZSHARED 0x0004 /* shared access (zfs_dirlook()) */ +#define ZXATTR 0x0008 /* we want the xattr dir */ +#define ZRENAMING 0x0010 /* znode is being renamed */ +#define ZCILOOK 0x0020 /* case-insensitive lookup requested */ +#define ZCIEXACT 0x0040 /* c-i requires c-s match (rename) */ + +/* mknode flags */ +#define IS_ROOT_NODE 0x01 /* create a root node */ +#define IS_XATTR 0x02 /* create an extended attribute node */ +#define IS_REPLAY 0x04 /* we are replaying intent log */ + +extern int zfs_dirent_lock(zfs_dirlock_t **, znode_t *, char *, znode_t **, + int, int *, pathname_t *); +extern void zfs_dirent_unlock(zfs_dirlock_t *); +extern int zfs_link_create(zfs_dirlock_t *, znode_t *, dmu_tx_t *, int); +extern int zfs_link_destroy(zfs_dirlock_t *, znode_t *, dmu_tx_t *, int, + boolean_t *); +extern int zfs_dirlook(znode_t *, char *, vnode_t **, int, int *, + pathname_t *); +extern void zfs_mknode(znode_t *, vattr_t *, dmu_tx_t *, cred_t *, + uint_t, znode_t **, int, zfs_acl_t *, zfs_fuid_info_t **); +extern void zfs_rmnode(znode_t *); +extern void zfs_dl_name_switch(zfs_dirlock_t *dl, char *new, char **old); +extern boolean_t zfs_dirempty(znode_t *); +extern void zfs_unlinked_add(znode_t *, dmu_tx_t *); +extern void zfs_unlinked_drain(zfsvfs_t *zfsvfs); +extern int zfs_sticky_remove_access(znode_t *, znode_t *, cred_t *cr); +extern int zfs_get_xattrdir(znode_t *, vnode_t **, cred_t *, int); +extern int zfs_make_xattrdir(znode_t *, vattr_t *, vnode_t **, cred_t *); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_FS_ZFS_DIR_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_fuid.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_fuid.h new file mode 100644 index 000000000000..810ffc81a8cc --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_fuid.h @@ -0,0 +1,125 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_FS_ZFS_FUID_H +#define _SYS_FS_ZFS_FUID_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef _KERNEL +#include +#include +#include +#include +#endif +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + ZFS_OWNER, + ZFS_GROUP, + ZFS_ACE_USER, + ZFS_ACE_GROUP +} zfs_fuid_type_t; + +/* + * Estimate space needed for one more fuid table entry. + * for now assume its current size + 1K + */ +#define FUID_SIZE_ESTIMATE(z) (z->z_fuid_size + (SPA_MINBLOCKSIZE << 1)) + +#define FUID_INDEX(x) (x >> 32) +#define FUID_RID(x) (x & 0xffffffff) +#define FUID_ENCODE(idx, rid) ((idx << 32) | rid) +/* + * FUIDs cause problems for the intent log + * we need to replay the creation of the FUID, + * but we can't count on the idmapper to be around + * and during replay the FUID index may be different than + * before. Also, if an ACL has 100 ACEs and 12 different + * domains we don't want to log 100 domain strings, but rather + * just the unique 12. + */ + +/* + * The FUIDs in the log will index into + * domain string table and the bottom half will be the rid. + * Used for mapping ephemeral uid/gid during ACL setting to FUIDs + */ +typedef struct zfs_fuid { + list_node_t z_next; + uint64_t z_id; /* uid/gid being converted to fuid */ + uint64_t z_domidx; /* index in AVL domain table */ + uint64_t z_logfuid; /* index for domain in log */ +} zfs_fuid_t; + +/* list of unique domains */ +typedef struct zfs_fuid_domain { + list_node_t z_next; + uint64_t z_domidx; /* AVL tree idx */ + const char *z_domain; /* domain string */ +} zfs_fuid_domain_t; + +/* + * FUID information necessary for logging create, setattr, and setacl. + */ +typedef struct zfs_fuid_info { + list_t z_fuids; + list_t z_domains; + uint64_t z_fuid_owner; + uint64_t z_fuid_group; + char **z_domain_table; /* Used during replay */ + uint32_t z_fuid_cnt; /* How many fuids in z_fuids */ + uint32_t z_domain_cnt; /* How many domains */ + size_t z_domain_str_sz; /* len of domain strings z_domain list */ +} zfs_fuid_info_t; + +#ifdef _KERNEL +struct znode; +extern uid_t zfs_fuid_map_id(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t); +extern void zfs_fuid_destroy(zfsvfs_t *); +extern uint64_t zfs_fuid_create_cred(zfsvfs_t *, zfs_fuid_type_t, + dmu_tx_t *, cred_t *, zfs_fuid_info_t **); +extern uint64_t zfs_fuid_create(zfsvfs_t *, uint64_t, cred_t *, zfs_fuid_type_t, + dmu_tx_t *, zfs_fuid_info_t **); +extern void zfs_fuid_map_ids(struct znode *zp, cred_t *cr, uid_t *uid, + uid_t *gid); +extern zfs_fuid_info_t *zfs_fuid_info_alloc(void); +extern void zfs_fuid_info_free(); +extern boolean_t zfs_groupmember(zfsvfs_t *, uint64_t, cred_t *); +#endif + +char *zfs_fuid_idx_domain(avl_tree_t *, uint32_t); +uint64_t zfs_fuid_table_load(objset_t *, uint64_t, avl_tree_t *, avl_tree_t *); +void zfs_fuid_table_destroy(avl_tree_t *, avl_tree_t *); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_FS_ZFS_FUID_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ioctl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ioctl.h new file mode 100644 index 000000000000..1692608bb9ce --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_ioctl.h @@ -0,0 +1,196 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZFS_IOCTL_H +#define _SYS_ZFS_IOCTL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include + +#ifdef _KERNEL +#include +#endif /* _KERNEL */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Property values for snapdir + */ +#define ZFS_SNAPDIR_HIDDEN 0 +#define ZFS_SNAPDIR_VISIBLE 1 + +#define DMU_BACKUP_STREAM_VERSION (1ULL) +#define DMU_BACKUP_HEADER_VERSION (2ULL) +#define DMU_BACKUP_MAGIC 0x2F5bacbacULL + +#define DRR_FLAG_CLONE (1<<0) +#define DRR_FLAG_CI_DATA (1<<1) + +/* + * zfs ioctl command structure + */ +typedef struct dmu_replay_record { + enum { + DRR_BEGIN, DRR_OBJECT, DRR_FREEOBJECTS, + DRR_WRITE, DRR_FREE, DRR_END, + } drr_type; + uint32_t drr_payloadlen; + union { + struct drr_begin { + uint64_t drr_magic; + uint64_t drr_version; + uint64_t drr_creation_time; + dmu_objset_type_t drr_type; + uint32_t drr_flags; + uint64_t drr_toguid; + uint64_t drr_fromguid; + char drr_toname[MAXNAMELEN]; + } drr_begin; + struct drr_end { + zio_cksum_t drr_checksum; + } drr_end; + struct drr_object { + uint64_t drr_object; + dmu_object_type_t drr_type; + dmu_object_type_t drr_bonustype; + uint32_t drr_blksz; + uint32_t drr_bonuslen; + uint8_t drr_checksum; + uint8_t drr_compress; + uint8_t drr_pad[6]; + /* bonus content follows */ + } drr_object; + struct drr_freeobjects { + uint64_t drr_firstobj; + uint64_t drr_numobjs; + } drr_freeobjects; + struct drr_write { + uint64_t drr_object; + dmu_object_type_t drr_type; + uint32_t drr_pad; + uint64_t drr_offset; + uint64_t drr_length; + /* content follows */ + } drr_write; + struct drr_free { + uint64_t drr_object; + uint64_t drr_offset; + uint64_t drr_length; + } drr_free; + } drr_u; +} dmu_replay_record_t; + +typedef struct zinject_record { + uint64_t zi_objset; + uint64_t zi_object; + uint64_t zi_start; + uint64_t zi_end; + uint64_t zi_guid; + uint32_t zi_level; + uint32_t zi_error; + uint64_t zi_type; + uint32_t zi_freq; + uint32_t zi_pad; /* pad out to 64 bit alignment */ +} zinject_record_t; + +#define ZINJECT_NULL 0x1 +#define ZINJECT_FLUSH_ARC 0x2 +#define ZINJECT_UNLOAD_SPA 0x4 + +typedef struct zfs_share { + uint64_t z_exportdata; + uint64_t z_sharedata; + uint64_t z_sharetype; /* 0 = share, 1 = unshare */ + uint64_t z_sharemax; /* max length of share string */ +} zfs_share_t; + +/* + * ZFS file systems may behave the usual, POSIX-compliant way, where + * name lookups are case-sensitive. They may also be set up so that + * all the name lookups are case-insensitive, or so that only some + * lookups, the ones that set an FIGNORECASE flag, are case-insensitive. + */ +typedef enum zfs_case { + ZFS_CASE_SENSITIVE, + ZFS_CASE_INSENSITIVE, + ZFS_CASE_MIXED +} zfs_case_t; + +typedef struct zfs_cmd { + char zc_name[MAXPATHLEN]; + char zc_value[MAXPATHLEN * 2]; + char zc_string[MAXNAMELEN]; + uint64_t zc_guid; + uint64_t zc_nvlist_conf; /* really (char *) */ + uint64_t zc_nvlist_conf_size; + uint64_t zc_nvlist_src; /* really (char *) */ + uint64_t zc_nvlist_src_size; + uint64_t zc_nvlist_dst; /* really (char *) */ + uint64_t zc_nvlist_dst_size; + uint64_t zc_cookie; + uint64_t zc_objset_type; + uint64_t zc_perm_action; + uint64_t zc_history; /* really (char *) */ + uint64_t zc_history_len; + uint64_t zc_history_offset; + uint64_t zc_obj; + zfs_share_t zc_share; + dmu_objset_stats_t zc_objset_stats; + struct drr_begin zc_begin_record; + zinject_record_t zc_inject_record; +} zfs_cmd_t; + +#define ZVOL_MAX_MINOR (1 << 16) +#define ZFS_MIN_MINOR (ZVOL_MAX_MINOR + 1) + +#ifdef _KERNEL + +typedef struct zfs_creat { + nvlist_t *zct_zplprops; + nvlist_t *zct_props; +} zfs_creat_t; + +extern dev_info_t *zfs_dip; + +extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr); +extern int zfs_secpolicy_rename_perms(const char *from, + const char *to, cred_t *cr); +extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr); +extern int zfs_busy(void); +extern int zfs_unmount_snap(char *, void *); + +#endif /* _KERNEL */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZFS_IOCTL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_rlock.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_rlock.h new file mode 100644 index 000000000000..f302b663e22a --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_rlock.h @@ -0,0 +1,89 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_FS_ZFS_RLOCK_H +#define _SYS_FS_ZFS_RLOCK_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _KERNEL + +#include + +typedef enum { + RL_READER, + RL_WRITER, + RL_APPEND +} rl_type_t; + +typedef struct rl { + znode_t *r_zp; /* znode this lock applies to */ + avl_node_t r_node; /* avl node link */ + uint64_t r_off; /* file range offset */ + uint64_t r_len; /* file range length */ + uint_t r_cnt; /* range reference count in tree */ + rl_type_t r_type; /* range type */ + kcondvar_t r_wr_cv; /* cv for waiting writers */ + kcondvar_t r_rd_cv; /* cv for waiting readers */ + uint8_t r_proxy; /* acting for original range */ + uint8_t r_write_wanted; /* writer wants to lock this range */ + uint8_t r_read_wanted; /* reader wants to lock this range */ +} rl_t; + +/* + * Lock a range (offset, length) as either shared (READER) + * or exclusive (WRITER or APPEND). APPEND is a special type that + * is converted to WRITER that specified to lock from the start of the + * end of file. zfs_range_lock() returns the range lock structure. + */ +rl_t *zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type); + +/* + * Unlock range and destroy range lock structure. + */ +void zfs_range_unlock(rl_t *rl); + +/* + * Reduce range locked as RW_WRITER from whole file to specified range. + * Asserts the whole file was previously locked. + */ +void zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len); + +/* + * AVL comparison function used to compare range locks + */ +int zfs_range_compare(const void *arg1, const void *arg2); + +#endif /* _KERNEL */ + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_FS_ZFS_RLOCK_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_vfsops.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_vfsops.h new file mode 100644 index 000000000000..87b75e6e75b5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_vfsops.h @@ -0,0 +1,140 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_FS_ZFS_VFSOPS_H +#define _SYS_FS_ZFS_VFSOPS_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct zfsvfs zfsvfs_t; + +struct zfsvfs { + vfs_t *z_vfs; /* generic fs struct */ + zfsvfs_t *z_parent; /* parent fs */ + objset_t *z_os; /* objset reference */ + uint64_t z_root; /* id of root znode */ + uint64_t z_unlinkedobj; /* id of unlinked zapobj */ + uint64_t z_max_blksz; /* maximum block size for files */ + uint64_t z_assign; /* TXG_NOWAIT or set by zil_replay() */ + uint64_t z_fuid_obj; /* fuid table object number */ + uint64_t z_fuid_size; /* fuid table size */ + avl_tree_t z_fuid_idx; /* fuid tree keyed by index */ + avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */ + krwlock_t z_fuid_lock; /* fuid lock */ + boolean_t z_fuid_loaded; /* fuid tables are loaded */ + struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */ + zilog_t *z_log; /* intent log pointer */ + uint_t z_acl_mode; /* acl chmod/mode behavior */ + uint_t z_acl_inherit; /* acl inheritance behavior */ + zfs_case_t z_case; /* case-sense */ + boolean_t z_utf8; /* utf8-only */ + int z_norm; /* normalization flags */ + boolean_t z_atime; /* enable atimes mount option */ + boolean_t z_unmounted; /* unmounted */ + rrwlock_t z_teardown_lock; + krwlock_t z_teardown_inactive_lock; + list_t z_all_znodes; /* all vnodes in the fs */ + kmutex_t z_znodes_lock; /* lock for z_all_znodes */ + vnode_t *z_ctldir; /* .zfs directory pointer */ + boolean_t z_show_ctldir; /* expose .zfs in the root dir */ + boolean_t z_issnap; /* true if this is a snapshot */ + boolean_t z_vscan; /* virus scan on/off */ + boolean_t z_use_fuids; /* version allows fuids */ + kmutex_t z_online_recv_lock; /* recv in prog grabs as WRITER */ + uint64_t z_version; /* ZPL version */ +#define ZFS_OBJ_MTX_SZ 64 + kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /* znode hold locks */ +}; + +/* + * Normal filesystems (those not under .zfs/snapshot) have a total + * file ID size limited to 12 bytes (including the length field) due to + * NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical + * reasons, this same limit is being imposed by the Solaris NFSv3 implementation + * (although the NFSv3 protocol actually permits a maximum of 64 bytes). It + * is not possible to expand beyond 12 bytes without abandoning support + * of NFSv2. + * + * For normal filesystems, we partition up the available space as follows: + * 2 bytes fid length (required) + * 6 bytes object number (48 bits) + * 4 bytes generation number (32 bits) + * + * We reserve only 48 bits for the object number, as this is the limit + * currently defined and imposed by the DMU. + */ +typedef struct zfid_short { + uint16_t zf_len; + uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */ + uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */ +} zfid_short_t; + +/* + * Filesystems under .zfs/snapshot have a total file ID size of 22 bytes + * (including the length field). This makes files under .zfs/snapshot + * accessible by NFSv3 and NFSv4, but not NFSv2. + * + * For files under .zfs/snapshot, we partition up the available space + * as follows: + * 2 bytes fid length (required) + * 6 bytes object number (48 bits) + * 4 bytes generation number (32 bits) + * 6 bytes objset id (48 bits) + * 4 bytes currently just zero (32 bits) + * + * We reserve only 48 bits for the object number and objset id, as these are + * the limits currently defined and imposed by the DMU. + */ +typedef struct zfid_long { + zfid_short_t z_fid; + uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */ + uint8_t zf_setgen[4]; /* gen[i] = gen >> (8 * i) */ +} zfid_long_t; + +#define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t)) +#define LONG_FID_LEN (sizeof (zfid_long_t) - sizeof (uint16_t)) + +extern uint_t zfs_fsyncer_key; + +extern int zfs_suspend_fs(zfsvfs_t *zfsvfs, char *osname, int *mode); +extern int zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_FS_ZFS_VFSOPS_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_znode.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_znode.h new file mode 100644 index 000000000000..a5416525c7a3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zfs_znode.h @@ -0,0 +1,356 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_FS_ZFS_ZNODE_H +#define _SYS_FS_ZFS_ZNODE_H + +#ifdef _KERNEL +#include +#include +#include +#include +#include +#include +#include +#endif +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Additional file level attributes, that are stored + * in the upper half of zp_flags + */ +#define ZFS_READONLY 0x0000000100000000 +#define ZFS_HIDDEN 0x0000000200000000 +#define ZFS_SYSTEM 0x0000000400000000 +#define ZFS_ARCHIVE 0x0000000800000000 +#define ZFS_IMMUTABLE 0x0000001000000000 +#define ZFS_NOUNLINK 0x0000002000000000 +#define ZFS_APPENDONLY 0x0000004000000000 +#define ZFS_NODUMP 0x0000008000000000 +#define ZFS_OPAQUE 0x0000010000000000 +#define ZFS_AV_QUARANTINED 0x0000020000000000 +#define ZFS_AV_MODIFIED 0x0000040000000000 + +#define ZFS_ATTR_SET(zp, attr, value) \ +{ \ + if (value) \ + zp->z_phys->zp_flags |= attr; \ + else \ + zp->z_phys->zp_flags &= ~attr; \ +} + +/* + * Define special zfs pflags + */ +#define ZFS_XATTR 0x1 /* is an extended attribute */ +#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */ +#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */ +#define ZFS_ACL_OBJ_ACE 0x8 /* ACL has CMPLX Object ACE */ +#define ZFS_ACL_PROTECTED 0x10 /* ACL protected */ +#define ZFS_ACL_DEFAULTED 0x20 /* ACL should be defaulted */ +#define ZFS_ACL_AUTO_INHERIT 0x40 /* ACL should be inherited */ +#define ZFS_BONUS_SCANSTAMP 0x80 /* Scanstamp in bonus area */ + +/* + * Is ID ephemeral? + */ +#define IS_EPHEMERAL(x) (x > MAXUID) + +/* + * Should we use FUIDs? + */ +#define USE_FUIDS(version, os) (version >= ZPL_VERSION_FUID &&\ + spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID) + +#define MASTER_NODE_OBJ 1 + +/* + * Special attributes for master node. + */ +#define ZFS_FSID "FSID" +#define ZFS_UNLINKED_SET "DELETE_QUEUE" +#define ZFS_ROOT_OBJ "ROOT" +#define ZPL_VERSION_STR "VERSION" +#define ZFS_FUID_TABLES "FUID" + +#define ZFS_MAX_BLOCKSIZE (SPA_MAXBLOCKSIZE) + +/* Path component length */ +/* + * The generic fs code uses MAXNAMELEN to represent + * what the largest component length is. Unfortunately, + * this length includes the terminating NULL. ZFS needs + * to tell the users via pathconf() and statvfs() what the + * true maximum length of a component is, excluding the NULL. + */ +#define ZFS_MAXNAMELEN (MAXNAMELEN - 1) + +/* + * Convert mode bits (zp_mode) to BSD-style DT_* values for storing in + * the directory entries. + */ +#define IFTODT(mode) (((mode) & S_IFMT) >> 12) + +/* + * The directory entry has the type (currently unused on Solaris) in the + * top 4 bits, and the object number in the low 48 bits. The "middle" + * 12 bits are unused. + */ +#define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4) +#define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48) + +/* + * This is the persistent portion of the znode. It is stored + * in the "bonus buffer" of the file. Short symbolic links + * are also stored in the bonus buffer. + */ +typedef struct znode_phys { + uint64_t zp_atime[2]; /* 0 - last file access time */ + uint64_t zp_mtime[2]; /* 16 - last file modification time */ + uint64_t zp_ctime[2]; /* 32 - last file change time */ + uint64_t zp_crtime[2]; /* 48 - creation time */ + uint64_t zp_gen; /* 64 - generation (txg of creation) */ + uint64_t zp_mode; /* 72 - file mode bits */ + uint64_t zp_size; /* 80 - size of file */ + uint64_t zp_parent; /* 88 - directory parent (`..') */ + uint64_t zp_links; /* 96 - number of links to file */ + uint64_t zp_xattr; /* 104 - DMU object for xattrs */ + uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */ + uint64_t zp_flags; /* 120 - persistent flags */ + uint64_t zp_uid; /* 128 - file owner */ + uint64_t zp_gid; /* 136 - owning group */ + uint64_t zp_zap; /* 144 - extra attributes */ + uint64_t zp_pad[3]; /* 152 - future */ + zfs_acl_phys_t zp_acl; /* 176 - 263 ACL */ + /* + * Data may pad out any remaining bytes in the znode buffer, eg: + * + * |<---------------------- dnode_phys (512) ------------------------>| + * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| + * |<---- znode (264) ---->|<---- data (56) ---->| + * + * At present, we use this space for the following: + * - symbolic links + * - 32-byte anti-virus scanstamp (regular files only) + */ +} znode_phys_t; + +/* + * Directory entry locks control access to directory entries. + * They are used to protect creates, deletes, and renames. + * Each directory znode has a mutex and a list of locked names. + */ +#ifdef _KERNEL +typedef struct zfs_dirlock { + char *dl_name; /* directory entry being locked */ + uint32_t dl_sharecnt; /* 0 if exclusive, > 0 if shared */ + uint16_t dl_namesize; /* set if dl_name was allocated */ + kcondvar_t dl_cv; /* wait for entry to be unlocked */ + struct znode *dl_dzp; /* directory znode */ + struct zfs_dirlock *dl_next; /* next in z_dirlocks list */ +} zfs_dirlock_t; + +typedef struct znode { + struct zfsvfs *z_zfsvfs; + vnode_t *z_vnode; + uint64_t z_id; /* object ID for this znode */ + kmutex_t z_lock; /* znode modification lock */ + krwlock_t z_map_lock; /* page map lock */ + krwlock_t z_parent_lock; /* parent lock for directories */ + krwlock_t z_name_lock; /* "master" lock for dirent locks */ + zfs_dirlock_t *z_dirlocks; /* directory entry lock list */ + kmutex_t z_range_lock; /* protects changes to z_range_avl */ + avl_tree_t z_range_avl; /* avl tree of file range locks */ + uint8_t z_unlinked; /* file has been unlinked */ + uint8_t z_atime_dirty; /* atime needs to be synced */ + uint8_t z_zn_prefetch; /* Prefetch znodes? */ + uint_t z_blksz; /* block size in bytes */ + uint_t z_seq; /* modification sequence number */ + uint64_t z_mapcnt; /* number of pages mapped to file */ + uint64_t z_last_itx; /* last ZIL itx on this znode */ + uint64_t z_gen; /* generation (same as zp_gen) */ + uint32_t z_sync_cnt; /* synchronous open count */ + kmutex_t z_acl_lock; /* acl data lock */ + list_node_t z_link_node; /* all znodes in fs link */ + /* + * These are dmu managed fields. + */ + znode_phys_t *z_phys; /* pointer to persistent znode */ + dmu_buf_t *z_dbuf; /* buffer containing the z_phys */ +} znode_t; + + +/* + * Range locking rules + * -------------------- + * 1. When truncating a file (zfs_create, zfs_setattr, zfs_space) the whole + * file range needs to be locked as RL_WRITER. Only then can the pages be + * freed etc and zp_size reset. zp_size must be set within range lock. + * 2. For writes and punching holes (zfs_write & zfs_space) just the range + * being written or freed needs to be locked as RL_WRITER. + * Multiple writes at the end of the file must coordinate zp_size updates + * to ensure data isn't lost. A compare and swap loop is currently used + * to ensure the file size is at least the offset last written. + * 3. For reads (zfs_read, zfs_get_data & zfs_putapage) just the range being + * read needs to be locked as RL_READER. A check against zp_size can then + * be made for reading beyond end of file. + */ + +/* + * Convert between znode pointers and vnode pointers + */ +#define ZTOV(ZP) ((ZP)->z_vnode) +#define VTOZ(VP) ((znode_t *)(VP)->v_data) + +/* + * ZFS_ENTER() is called on entry to each ZFS vnode and vfs operation. + * ZFS_EXIT() must be called before exitting the vop. + * ZFS_VERIFY_ZP() verifies the znode is valid. + */ +#define ZFS_ENTER(zfsvfs) \ + { \ + rrw_enter(&(zfsvfs)->z_teardown_lock, RW_READER, FTAG); \ + if ((zfsvfs)->z_unmounted) { \ + ZFS_EXIT(zfsvfs); \ + return (EIO); \ + } \ + } + +#define ZFS_EXIT(zfsvfs) rrw_exit(&(zfsvfs)->z_teardown_lock, FTAG) + +#define ZFS_VERIFY_ZP(zp) \ + if ((zp)->z_dbuf == NULL) { \ + ZFS_EXIT((zp)->z_zfsvfs); \ + return (EIO); \ + } \ + +/* + * Macros for dealing with dmu_buf_hold + */ +#define ZFS_OBJ_HASH(obj_num) ((obj_num) & (ZFS_OBJ_MTX_SZ - 1)) +#define ZFS_OBJ_MUTEX(zfsvfs, obj_num) \ + (&(zfsvfs)->z_hold_mtx[ZFS_OBJ_HASH(obj_num)]) +#define ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num) \ + mutex_enter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num))) +#define ZFS_OBJ_HOLD_TRYENTER(zfsvfs, obj_num) \ + mutex_tryenter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num))) +#define ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num) \ + mutex_exit(ZFS_OBJ_MUTEX((zfsvfs), (obj_num))) + +/* + * Macros to encode/decode ZFS stored time values from/to struct timespec + */ +#define ZFS_TIME_ENCODE(tp, stmp) \ +{ \ + (stmp)[0] = (uint64_t)(tp)->tv_sec; \ + (stmp)[1] = (uint64_t)(tp)->tv_nsec; \ +} + +#define ZFS_TIME_DECODE(tp, stmp) \ +{ \ + (tp)->tv_sec = (time_t)(stmp)[0]; \ + (tp)->tv_nsec = (long)(stmp)[1]; \ +} + +/* + * Timestamp defines + */ +#define ACCESSED (AT_ATIME) +#define STATE_CHANGED (AT_CTIME) +#define CONTENT_MODIFIED (AT_MTIME | AT_CTIME) + +#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp) \ + if ((zfsvfs)->z_atime && !((zfsvfs)->z_vfs->vfs_flag & VFS_RDONLY)) \ + zfs_time_stamper(zp, ACCESSED, NULL) + +extern int zfs_init_fs(zfsvfs_t *, znode_t **); +extern void zfs_set_dataprop(objset_t *); +extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *, + dmu_tx_t *tx); +extern void zfs_time_stamper(znode_t *, uint_t, dmu_tx_t *); +extern void zfs_time_stamper_locked(znode_t *, uint_t, dmu_tx_t *); +extern void zfs_grow_blocksize(znode_t *, uint64_t, dmu_tx_t *); +extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t); +extern void zfs_znode_init(void); +extern void zfs_znode_fini(void); +extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **); +extern int zfs_rezget(znode_t *); +extern void zfs_zinactive(znode_t *); +extern void zfs_znode_delete(znode_t *, dmu_tx_t *); +extern void zfs_znode_free(znode_t *); +extern void zfs_remove_op_tables(); +extern int zfs_create_op_tables(); +extern int zfs_sync(vfs_t *vfsp, short flag, cred_t *cr); +extern dev_t zfs_cmpldev(uint64_t); +extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value); +extern int zfs_set_version(const char *name, uint64_t newvers); +extern int zfs_get_stats(objset_t *os, nvlist_t *nv); +extern void zfs_znode_dmu_fini(znode_t *); + +extern void zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name, vsecattr_t *, zfs_fuid_info_t *, + vattr_t *vap); +extern int zfs_log_create_txtype(zil_create_t, vsecattr_t *vsecp, + vattr_t *vap); +extern void zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, char *name); +extern void zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name); +extern void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name, char *link); +extern void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp); +extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, offset_t off, ssize_t len, int ioflag); +extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, uint64_t off, uint64_t len); +extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp); +extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp, + vsecattr_t *vsecp, zfs_fuid_info_t *fuidp); +extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap); +extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx); + +extern caddr_t zfs_map_page(page_t *, enum seg_rw); +extern void zfs_unmap_page(page_t *, caddr_t); + +extern zil_get_data_t zfs_get_data; +extern zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE]; +extern int zfsfstype; + +#endif /* _KERNEL */ + +extern int zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_FS_ZFS_ZNODE_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil.h new file mode 100644 index 000000000000..4d02d14f7075 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil.h @@ -0,0 +1,382 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZIL_H +#define _SYS_ZIL_H + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Intent log format: + * + * Each objset has its own intent log. The log header (zil_header_t) + * for objset N's intent log is kept in the Nth object of the SPA's + * intent_log objset. The log header points to a chain of log blocks, + * each of which contains log records (i.e., transactions) followed by + * a log block trailer (zil_trailer_t). The format of a log record + * depends on the record (or transaction) type, but all records begin + * with a common structure that defines the type, length, and txg. + */ + +/* + * Intent log header - this on disk structure holds fields to manage + * the log. All fields are 64 bit to easily handle cross architectures. + */ +typedef struct zil_header { + uint64_t zh_claim_txg; /* txg in which log blocks were claimed */ + uint64_t zh_replay_seq; /* highest replayed sequence number */ + blkptr_t zh_log; /* log chain */ + uint64_t zh_claim_seq; /* highest claimed sequence number */ + uint64_t zh_pad[5]; +} zil_header_t; + +/* + * Log block trailer - structure at the end of the header and each log block + * + * The zit_bt contains a zbt_cksum which for the intent log is + * the sequence number of this log block. A seq of 0 is invalid. + * The zbt_cksum is checked by the SPA against the sequence + * number passed in the blk_cksum field of the blkptr_t + */ +typedef struct zil_trailer { + uint64_t zit_pad; + blkptr_t zit_next_blk; /* next block in chain */ + uint64_t zit_nused; /* bytes in log block used */ + zio_block_tail_t zit_bt; /* block trailer */ +} zil_trailer_t; + +#define ZIL_MIN_BLKSZ 4096ULL +#define ZIL_MAX_BLKSZ SPA_MAXBLOCKSIZE +#define ZIL_BLK_DATA_SZ(lwb) ((lwb)->lwb_sz - sizeof (zil_trailer_t)) + +/* + * The words of a log block checksum. + */ +#define ZIL_ZC_GUID_0 0 +#define ZIL_ZC_GUID_1 1 +#define ZIL_ZC_OBJSET 2 +#define ZIL_ZC_SEQ 3 + +typedef enum zil_create { + Z_FILE, + Z_DIR, + Z_XATTRDIR, +} zil_create_t; + +/* + * size of xvattr log section. + * its composed of lr_attr_t + xvattr bitmap + 2 64 bit timestamps + * for create time and a single 64 bit integer for all of the attributes, + * and 4 64 bit integers (32 bytes) for the scanstamp. + * + */ + +#define ZIL_XVAT_SIZE(mapsize) \ + sizeof (lr_attr_t) + (sizeof (uint32_t) * (mapsize - 1)) + \ + (sizeof (uint64_t) * 7) + +/* + * Size of ACL in log. The ACE data is padded out to properly align + * on 8 byte boundary. + */ + +#define ZIL_ACE_LENGTH(x) (roundup(x, sizeof (uint64_t))) + +/* + * Intent log transaction types and record structures + */ +#define TX_CREATE 1 /* Create file */ +#define TX_MKDIR 2 /* Make directory */ +#define TX_MKXATTR 3 /* Make XATTR directory */ +#define TX_SYMLINK 4 /* Create symbolic link to a file */ +#define TX_REMOVE 5 /* Remove file */ +#define TX_RMDIR 6 /* Remove directory */ +#define TX_LINK 7 /* Create hard link to a file */ +#define TX_RENAME 8 /* Rename a file */ +#define TX_WRITE 9 /* File write */ +#define TX_TRUNCATE 10 /* Truncate a file */ +#define TX_SETATTR 11 /* Set file attributes */ +#define TX_ACL_V0 12 /* Set old formatted ACL */ +#define TX_ACL 13 /* Set ACL */ +#define TX_CREATE_ACL 14 /* create with ACL */ +#define TX_CREATE_ATTR 15 /* create + attrs */ +#define TX_CREATE_ACL_ATTR 16 /* create with ACL + attrs */ +#define TX_MKDIR_ACL 17 /* mkdir with ACL */ +#define TX_MKDIR_ATTR 18 /* mkdir with attr */ +#define TX_MKDIR_ACL_ATTR 19 /* mkdir with ACL + attrs */ +#define TX_MAX_TYPE 20 /* Max transaction type */ + +/* + * The transactions for mkdir, symlink, remove, rmdir, link, and rename + * may have the following bit set, indicating the original request + * specified case-insensitive handling of names. + */ +#define TX_CI ((uint64_t)0x1 << 63) /* case-insensitive behavior requested */ + +/* + * Format of log records. + * The fields are carefully defined to allow them to be aligned + * and sized the same on sparc & intel architectures. + * Each log record has a common structure at the beginning. + * + * Note, lrc_seq holds two different sequence numbers. Whilst in memory + * it contains the transaction sequence number. The log record on + * disk holds the sequence number of all log records which is used to + * ensure we don't replay the same record. The two sequence numbers are + * different because the transactions can now be pushed out of order. + */ +typedef struct { /* common log record header */ + uint64_t lrc_txtype; /* intent log transaction type */ + uint64_t lrc_reclen; /* transaction record length */ + uint64_t lrc_txg; /* dmu transaction group number */ + uint64_t lrc_seq; /* see comment above */ +} lr_t; + +/* + * Handle option extended vattr attributes. + * + * Whenever new attributes are added the version number + * will need to be updated as will code in + * zfs_log.c and zfs_replay.c + */ +typedef struct { + uint32_t lr_attr_masksize; /* number of elements in array */ + uint32_t lr_attr_bitmap; /* First entry of array */ + /* remainder of array and any additional fields */ +} lr_attr_t; + +/* + * log record for creates without optional ACL. + * This log record does support optional xvattr_t attributes. + */ +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_doid; /* object id of directory */ + uint64_t lr_foid; /* object id of created file object */ + uint64_t lr_mode; /* mode of object */ + uint64_t lr_uid; /* uid of object */ + uint64_t lr_gid; /* gid of object */ + uint64_t lr_gen; /* generation (txg of creation) */ + uint64_t lr_crtime[2]; /* creation time */ + uint64_t lr_rdev; /* rdev of object to create */ + /* name of object to create follows this */ + /* for symlinks, link content follows name */ + /* for creates with xvattr data, the name follows the xvattr info */ +} lr_create_t; + +/* + * FUID ACL record will be an array of ACEs from the original ACL. + * If this array includes ephemeral IDs, the record will also include + * an array of log-specific FUIDs to replace the ephemeral IDs. + * Only one copy of each unique domain will be present, so the log-specific + * FUIDs will use an index into a compressed domain table. On replay this + * information will be used to construct real FUIDs (and bypass idmap, + * since it may not be available). + */ + +/* + * Log record for creates with optional ACL + * This log record is also used for recording any FUID + * information needed for replaying the create. If the + * file doesn't have any actual ACEs then the lr_aclcnt + * would be zero. + */ +typedef struct { + lr_create_t lr_create; /* common create portion */ + uint64_t lr_aclcnt; /* number of ACEs in ACL */ + uint64_t lr_domcnt; /* number of unique domains */ + uint64_t lr_fuidcnt; /* number of real fuids */ + uint64_t lr_acl_bytes; /* number of bytes in ACL */ + uint64_t lr_acl_flags; /* ACL flags */ + /* lr_acl_bytes number of variable sized ace's follows */ + /* if create is also setting xvattr's, then acl data follows xvattr */ + /* if ACE FUIDs are needed then they will follow the xvattr_t */ + /* Following the FUIDs will be the domain table information. */ + /* The FUIDs for the owner and group will be in the lr_create */ + /* portion of the record. */ + /* name follows ACL data */ +} lr_acl_create_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_doid; /* obj id of directory */ + /* name of object to remove follows this */ +} lr_remove_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_doid; /* obj id of directory */ + uint64_t lr_link_obj; /* obj id of link */ + /* name of object to link follows this */ +} lr_link_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_sdoid; /* obj id of source directory */ + uint64_t lr_tdoid; /* obj id of target directory */ + /* 2 strings: names of source and destination follow this */ +} lr_rename_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_foid; /* file object to write */ + uint64_t lr_offset; /* offset to write to */ + uint64_t lr_length; /* user data length to write */ + uint64_t lr_blkoff; /* offset represented by lr_blkptr */ + blkptr_t lr_blkptr; /* spa block pointer for replay */ + /* write data will follow for small writes */ +} lr_write_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_foid; /* object id of file to truncate */ + uint64_t lr_offset; /* offset to truncate from */ + uint64_t lr_length; /* length to truncate */ +} lr_truncate_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_foid; /* file object to change attributes */ + uint64_t lr_mask; /* mask of attributes to set */ + uint64_t lr_mode; /* mode to set */ + uint64_t lr_uid; /* uid to set */ + uint64_t lr_gid; /* gid to set */ + uint64_t lr_size; /* size to set */ + uint64_t lr_atime[2]; /* access time */ + uint64_t lr_mtime[2]; /* modification time */ + /* optional attribute lr_attr_t may be here */ +} lr_setattr_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_foid; /* obj id of file */ + uint64_t lr_aclcnt; /* number of acl entries */ + /* lr_aclcnt number of ace_t entries follow this */ +} lr_acl_v0_t; + +typedef struct { + lr_t lr_common; /* common portion of log record */ + uint64_t lr_foid; /* obj id of file */ + uint64_t lr_aclcnt; /* number of ACEs in ACL */ + uint64_t lr_domcnt; /* number of unique domains */ + uint64_t lr_fuidcnt; /* number of real fuids */ + uint64_t lr_acl_bytes; /* number of bytes in ACL */ + uint64_t lr_acl_flags; /* ACL flags */ + /* lr_acl_bytes number of variable sized ace's follows */ +} lr_acl_t; + +/* + * ZIL structure definitions, interface function prototype and globals. + */ + +/* + * ZFS intent log transaction structure + */ +typedef enum { + WR_INDIRECT, /* indirect - a large write (dmu_sync() data */ + /* and put blkptr in log, rather than actual data) */ + WR_COPIED, /* immediate - data is copied into lr_write_t */ + WR_NEED_COPY, /* immediate - data needs to be copied if pushed */ +} itx_wr_state_t; + +typedef struct itx { + list_node_t itx_node; /* linkage on zl_itx_list */ + void *itx_private; /* type-specific opaque data */ + itx_wr_state_t itx_wr_state; /* write state */ + uint8_t itx_sync; /* synchronous transaction */ + uint64_t itx_sod; /* record size on disk */ + lr_t itx_lr; /* common part of log record */ + /* followed by type-specific part of lr_xx_t and its immediate data */ +} itx_t; + + +/* + * zgd_t is passed through dmu_sync() to the callback routine zfs_get_done() + * to handle the cleanup of the dmu_sync() buffer write + */ +typedef struct { + zilog_t *zgd_zilog; /* zilog */ + blkptr_t *zgd_bp; /* block pointer */ + struct rl *zgd_rl; /* range lock */ +} zgd_t; + + +typedef void zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg, + uint64_t txg); +typedef void zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg, + uint64_t txg); +typedef int zil_replay_func_t(); +typedef void zil_replay_cleaner_t(); +typedef int zil_get_data_t(void *arg, lr_write_t *lr, char *dbuf, zio_t *zio); + +extern uint64_t zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, + zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg); + +extern void zil_init(void); +extern void zil_fini(void); + +extern zilog_t *zil_alloc(objset_t *os, zil_header_t *zh_phys); +extern void zil_free(zilog_t *zilog); + +extern zilog_t *zil_open(objset_t *os, zil_get_data_t *get_data); +extern void zil_close(zilog_t *zilog); + +extern void zil_replay(objset_t *os, void *arg, uint64_t *txgp, + zil_replay_func_t *replay_func[TX_MAX_TYPE], + zil_replay_cleaner_t *replay_cleaner); +extern void zil_destroy(zilog_t *zilog, boolean_t keep_first); +extern void zil_rollback_destroy(zilog_t *zilog, dmu_tx_t *tx); + +extern itx_t *zil_itx_create(uint64_t txtype, size_t lrsize); +extern uint64_t zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx); + +extern void zil_commit(zilog_t *zilog, uint64_t seq, uint64_t oid); + +extern int zil_claim(char *osname, void *txarg); +extern int zil_check_log_chain(char *osname, void *txarg); +extern int zil_clear_log_chain(char *osname, void *txarg); +extern void zil_sync(zilog_t *zilog, dmu_tx_t *tx); +extern void zil_clean(zilog_t *zilog); +extern int zil_is_committed(zilog_t *zilog); + +extern int zil_suspend(zilog_t *zilog); +extern void zil_resume(zilog_t *zilog); + +extern void zil_add_block(zilog_t *zilog, blkptr_t *bp); + +extern int zil_disable; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZIL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil_impl.h new file mode 100644 index 000000000000..0fc800b96dea --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zil_impl.h @@ -0,0 +1,109 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZIL_IMPL_H +#define _SYS_ZIL_IMPL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Log write buffer. + */ +typedef struct lwb { + zilog_t *lwb_zilog; /* back pointer to log struct */ + blkptr_t lwb_blk; /* on disk address of this log blk */ + int lwb_nused; /* # used bytes in buffer */ + int lwb_sz; /* size of block and buffer */ + char *lwb_buf; /* log write buffer */ + zio_t *lwb_zio; /* zio for this buffer */ + uint64_t lwb_max_txg; /* highest txg in this lwb */ + txg_handle_t lwb_txgh; /* txg handle for txg_exit() */ + list_node_t lwb_node; /* zilog->zl_lwb_list linkage */ +} lwb_t; + +/* + * Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs + * we've touched so we know which ones need a write cache flush at the end. + */ +typedef struct zil_vdev_node { + uint64_t zv_vdev; /* vdev to be flushed */ + avl_node_t zv_node; /* AVL tree linkage */ +} zil_vdev_node_t; + +/* + * Stable storage intent log management structure. One per dataset. + */ +struct zilog { + kmutex_t zl_lock; /* protects most zilog_t fields */ + struct dsl_pool *zl_dmu_pool; /* DSL pool */ + spa_t *zl_spa; /* handle for read/write log */ + const zil_header_t *zl_header; /* log header buffer */ + objset_t *zl_os; /* object set we're logging */ + zil_get_data_t *zl_get_data; /* callback to get object content */ + zio_t *zl_root_zio; /* log writer root zio */ + uint64_t zl_itx_seq; /* next itx sequence number */ + uint64_t zl_commit_seq; /* committed upto this number */ + uint64_t zl_lr_seq; /* log record sequence number */ + uint64_t zl_destroy_txg; /* txg of last zil_destroy() */ + uint64_t zl_replay_seq[TXG_SIZE]; /* seq of last replayed rec */ + uint32_t zl_suspend; /* log suspend count */ + kcondvar_t zl_cv_writer; /* log writer thread completion */ + kcondvar_t zl_cv_suspend; /* log suspend completion */ + uint8_t zl_suspending; /* log is currently suspending */ + uint8_t zl_keep_first; /* keep first log block in destroy */ + uint8_t zl_stop_replay; /* don't replay any further */ + uint8_t zl_stop_sync; /* for debugging */ + uint8_t zl_writer; /* boolean: write setup in progress */ + uint8_t zl_log_error; /* boolean: log write error */ + list_t zl_itx_list; /* in-memory itx list */ + uint64_t zl_itx_list_sz; /* total size of records on list */ + uint64_t zl_cur_used; /* current commit log size used */ + uint64_t zl_prev_used; /* previous commit log size used */ + list_t zl_lwb_list; /* in-flight log write list */ + kmutex_t zl_vdev_lock; /* protects zl_vdev_tree */ + avl_tree_t zl_vdev_tree; /* vdevs to flush in zil_commit() */ + taskq_t *zl_clean_taskq; /* runs lwb and itx clean tasks */ + avl_tree_t zl_dva_tree; /* track DVAs during log parse */ + clock_t zl_replay_time; /* lbolt of when replay started */ + uint64_t zl_replay_blks; /* number of log blocks replayed */ +}; + +typedef struct zil_dva_node { + dva_t zn_dva; + avl_node_t zn_node; +} zil_dva_node_t; + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZIL_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio.h new file mode 100644 index 000000000000..4de78dfee014 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio.h @@ -0,0 +1,424 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZIO_H +#define _ZIO_H + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZBT_MAGIC 0x210da7ab10c7a11ULL /* zio data bloc tail */ + +typedef struct zio_block_tail { + uint64_t zbt_magic; /* for validation, endianness */ + zio_cksum_t zbt_cksum; /* 256-bit checksum */ +} zio_block_tail_t; + +/* + * Gang block headers are self-checksumming and contain an array + * of block pointers. + */ +#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE +#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \ + sizeof (zio_block_tail_t)) / sizeof (blkptr_t)) +#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \ + sizeof (zio_block_tail_t) - \ + (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\ + sizeof (uint64_t)) + +typedef struct zio_gbh { + blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS]; + uint64_t zg_filler[SPA_GBH_FILLER]; + zio_block_tail_t zg_tail; +} zio_gbh_phys_t; + +enum zio_checksum { + ZIO_CHECKSUM_INHERIT = 0, + ZIO_CHECKSUM_ON, + ZIO_CHECKSUM_OFF, + ZIO_CHECKSUM_LABEL, + ZIO_CHECKSUM_GANG_HEADER, + ZIO_CHECKSUM_ZILOG, + ZIO_CHECKSUM_FLETCHER_2, + ZIO_CHECKSUM_FLETCHER_4, + ZIO_CHECKSUM_SHA256, + ZIO_CHECKSUM_FUNCTIONS +}; + +#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_2 +#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON + +enum zio_compress { + ZIO_COMPRESS_INHERIT = 0, + ZIO_COMPRESS_ON, + ZIO_COMPRESS_OFF, + ZIO_COMPRESS_LZJB, + ZIO_COMPRESS_EMPTY, + ZIO_COMPRESS_GZIP_1, + ZIO_COMPRESS_GZIP_2, + ZIO_COMPRESS_GZIP_3, + ZIO_COMPRESS_GZIP_4, + ZIO_COMPRESS_GZIP_5, + ZIO_COMPRESS_GZIP_6, + ZIO_COMPRESS_GZIP_7, + ZIO_COMPRESS_GZIP_8, + ZIO_COMPRESS_GZIP_9, + ZIO_COMPRESS_FUNCTIONS +}; + +#define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB +#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF + +#define ZIO_FAILURE_MODE_WAIT 0 +#define ZIO_FAILURE_MODE_CONTINUE 1 +#define ZIO_FAILURE_MODE_PANIC 2 + +#define ZIO_PRIORITY_NOW (zio_priority_table[0]) +#define ZIO_PRIORITY_SYNC_READ (zio_priority_table[1]) +#define ZIO_PRIORITY_SYNC_WRITE (zio_priority_table[2]) +#define ZIO_PRIORITY_ASYNC_READ (zio_priority_table[3]) +#define ZIO_PRIORITY_ASYNC_WRITE (zio_priority_table[4]) +#define ZIO_PRIORITY_FREE (zio_priority_table[5]) +#define ZIO_PRIORITY_CACHE_FILL (zio_priority_table[6]) +#define ZIO_PRIORITY_LOG_WRITE (zio_priority_table[7]) +#define ZIO_PRIORITY_RESILVER (zio_priority_table[8]) +#define ZIO_PRIORITY_SCRUB (zio_priority_table[9]) +#define ZIO_PRIORITY_TABLE_SIZE 10 + +#define ZIO_FLAG_MUSTSUCCEED 0x00000 +#define ZIO_FLAG_CANFAIL 0x00001 +#define ZIO_FLAG_SPECULATIVE 0x00002 +#define ZIO_FLAG_CONFIG_WRITER 0x00004 +#define ZIO_FLAG_DONT_RETRY 0x00008 + +#define ZIO_FLAG_DONT_CACHE 0x00010 +#define ZIO_FLAG_DONT_QUEUE 0x00020 +#define ZIO_FLAG_DONT_AGGREGATE 0x00040 +#define ZIO_FLAG_DONT_PROPAGATE 0x00080 + +#define ZIO_FLAG_IO_BYPASS 0x00100 +#define ZIO_FLAG_IO_REPAIR 0x00200 +#define ZIO_FLAG_IO_RETRY 0x00400 +#define ZIO_FLAG_IO_REWRITE 0x00800 + +#define ZIO_FLAG_PROBE 0x01000 +#define ZIO_FLAG_RESILVER 0x02000 +#define ZIO_FLAG_SCRUB 0x04000 +#define ZIO_FLAG_SCRUB_THREAD 0x08000 + +#define ZIO_FLAG_GANG_CHILD 0x10000 + +#define ZIO_FLAG_GANG_INHERIT \ + (ZIO_FLAG_CANFAIL | \ + ZIO_FLAG_SPECULATIVE | \ + ZIO_FLAG_CONFIG_WRITER | \ + ZIO_FLAG_DONT_RETRY | \ + ZIO_FLAG_DONT_CACHE | \ + ZIO_FLAG_DONT_AGGREGATE | \ + ZIO_FLAG_RESILVER | \ + ZIO_FLAG_SCRUB | \ + ZIO_FLAG_SCRUB_THREAD) + +#define ZIO_FLAG_VDEV_INHERIT \ + (ZIO_FLAG_GANG_INHERIT | \ + ZIO_FLAG_IO_REPAIR | \ + ZIO_FLAG_IO_RETRY | \ + ZIO_FLAG_PROBE) + +#define ZIO_PIPELINE_CONTINUE 0x100 +#define ZIO_PIPELINE_STOP 0x101 + +#define ZIO_GANG_CHILD_FLAGS(zio) \ + (((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \ + ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL) + +enum zio_child { + ZIO_CHILD_VDEV = 0, + ZIO_CHILD_GANG, + ZIO_CHILD_LOGICAL, + ZIO_CHILD_TYPES +}; + +enum zio_wait_type { + ZIO_WAIT_READY = 0, + ZIO_WAIT_DONE, + ZIO_WAIT_TYPES +}; + +/* + * We'll take the unused errnos, 'EBADE' and 'EBADR' (from the Convergent + * graveyard) to indicate checksum errors and fragmentation. + */ +#define ECKSUM EBADE +#define EFRAGS EBADR + +typedef struct zio zio_t; +typedef void zio_done_func_t(zio_t *zio); + +extern uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE]; +extern char *zio_type_name[ZIO_TYPES]; + +/* + * A bookmark is a four-tuple that uniquely + * identifies any block in the pool. By convention, the meta-objset (MOS) + * is objset 0, the meta-dnode is object 0, the root block (osphys_t) is + * level -1 of the meta-dnode, and intent log blocks (which are chained + * off the root block) have blkid == sequence number. In summary: + * + * mos is objset 0 + * meta-dnode is object 0 + * root block is + * intent log is + * + * Note: this structure is called a bookmark because its first purpose was + * to remember where to resume a pool-wide traverse. The absolute ordering + * for block visitation during traversal is defined in compare_bookmark(). + * + * Note: this structure is passed between userland and the kernel. + * Therefore it must not change size or alignment between 32/64 bit + * compilation options. + */ +typedef struct zbookmark { + uint64_t zb_objset; + uint64_t zb_object; + int64_t zb_level; + uint64_t zb_blkid; +} zbookmark_t; + +typedef struct zio_prop { + enum zio_checksum zp_checksum; + enum zio_compress zp_compress; + dmu_object_type_t zp_type; + uint8_t zp_level; + uint8_t zp_ndvas; +} zio_prop_t; + +typedef struct zio_gang_node { + zio_gbh_phys_t *gn_gbh; + struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS]; +} zio_gang_node_t; + +typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp, + zio_gang_node_t *gn, void *data); + +typedef void zio_transform_func_t(zio_t *zio, void *data, uint64_t size); + +typedef struct zio_transform { + void *zt_orig_data; + uint64_t zt_orig_size; + uint64_t zt_bufsize; + zio_transform_func_t *zt_transform; + struct zio_transform *zt_next; +} zio_transform_t; + +typedef int zio_pipe_stage_t(zio_t *zio); + +/* + * The io_reexecute flags are distinct from io_flags because the child must + * be able to propagate them to the parent. The normal io_flags are local + * to the zio, not protected by any lock, and not modifiable by children; + * the reexecute flags are protected by io_lock, modifiable by children, + * and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set. + */ +#define ZIO_REEXECUTE_NOW 0x01 +#define ZIO_REEXECUTE_SUSPEND 0x02 + +struct zio { + /* Core information about this I/O */ + zbookmark_t io_bookmark; + zio_prop_t io_prop; + zio_type_t io_type; + enum zio_child io_child_type; + int io_cmd; + uint8_t io_priority; + uint8_t io_reexecute; + uint8_t io_async_root; + uint64_t io_txg; + spa_t *io_spa; + blkptr_t *io_bp; + blkptr_t io_bp_copy; + zio_t *io_parent; + zio_t *io_child; + zio_t *io_sibling_prev; + zio_t *io_sibling_next; + zio_t *io_logical; + zio_transform_t *io_transform_stack; + + /* Callback info */ + zio_done_func_t *io_ready; + zio_done_func_t *io_done; + void *io_private; + blkptr_t io_bp_orig; + + /* Data represented by this I/O */ + void *io_data; + uint64_t io_size; + + /* Stuff for the vdev stack */ + vdev_t *io_vd; + void *io_vsd; + zio_done_func_t *io_vsd_free; + uint64_t io_offset; + uint64_t io_deadline; + avl_node_t io_offset_node; + avl_node_t io_deadline_node; + avl_tree_t *io_vdev_tree; + zio_t *io_delegate_list; + zio_t *io_delegate_next; + + /* Internal pipeline state */ + int io_flags; + zio_stage_t io_stage; + uint32_t io_pipeline; + int io_orig_flags; + zio_stage_t io_orig_stage; + uint32_t io_orig_pipeline; + int io_error; + int io_child_error[ZIO_CHILD_TYPES]; + uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES]; + uint64_t *io_stall; + zio_gang_node_t *io_gang_tree; + void *io_executor; + void *io_waiter; + kmutex_t io_lock; + kcondvar_t io_cv; + + /* FMA state */ + uint64_t io_ena; +}; + +extern zio_t *zio_null(zio_t *pio, spa_t *spa, + zio_done_func_t *done, void *private, int flags); + +extern zio_t *zio_root(spa_t *spa, + zio_done_func_t *done, void *private, int flags); + +extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, void *data, + uint64_t size, zio_done_func_t *done, void *private, + int priority, int flags, const zbookmark_t *zb); + +extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + void *data, uint64_t size, zio_prop_t *zp, + zio_done_func_t *ready, zio_done_func_t *done, void *private, + int priority, int flags, const zbookmark_t *zb); + +extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + void *data, uint64_t size, zio_done_func_t *done, void *private, + int priority, int flags, zbookmark_t *zb); + +extern void zio_skip_write(zio_t *zio); + +extern zio_t *zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + zio_done_func_t *done, void *private, int flags); + +extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, + zio_done_func_t *done, void *private, int flags); + +extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, + zio_done_func_t *done, void *private, int priority, int flags); + +extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, + uint64_t size, void *data, int checksum, + zio_done_func_t *done, void *private, int priority, int flags, + boolean_t labels); + +extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, + uint64_t size, void *data, int checksum, + zio_done_func_t *done, void *private, int priority, int flags, + boolean_t labels); + +extern int zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, + blkptr_t *old_bp, uint64_t txg); +extern void zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg); +extern void zio_flush(zio_t *zio, vdev_t *vd); + +extern int zio_wait(zio_t *zio); +extern void zio_nowait(zio_t *zio); +extern void zio_execute(zio_t *zio); +extern void zio_interrupt(zio_t *zio); + +extern void *zio_buf_alloc(size_t size); +extern void zio_buf_free(void *buf, size_t size); +extern void *zio_data_buf_alloc(size_t size); +extern void zio_data_buf_free(void *buf, size_t size); + +extern void zio_resubmit_stage_async(void *); + +extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, + uint64_t offset, void *data, uint64_t size, int type, int priority, + int flags, zio_done_func_t *done, void *private); + +extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, + void *data, uint64_t size, int type, int priority, + int flags, zio_done_func_t *done, void *private); + +extern void zio_vdev_io_bypass(zio_t *zio); +extern void zio_vdev_io_reissue(zio_t *zio); +extern void zio_vdev_io_redone(zio_t *zio); + +extern void zio_checksum_verified(zio_t *zio); +extern int zio_worst_error(int e1, int e2); + +extern uint8_t zio_checksum_select(uint8_t child, uint8_t parent); +extern uint8_t zio_compress_select(uint8_t child, uint8_t parent); + +extern void zio_suspend(spa_t *spa, zio_t *zio); +extern void zio_resume(spa_t *spa); +extern void zio_resume_wait(spa_t *spa); + +/* + * Initial setup and teardown. + */ +extern void zio_init(void); +extern void zio_fini(void); + +/* + * Fault injection + */ +struct zinject_record; +extern uint32_t zio_injection_enabled; +extern int zio_inject_fault(char *name, int flags, int *id, + struct zinject_record *record); +extern int zio_inject_list_next(int *id, char *name, size_t buflen, + struct zinject_record *record); +extern int zio_clear_fault(int id); +extern int zio_handle_fault_injection(zio_t *zio, int error); +extern int zio_handle_device_injection(vdev_t *vd, int error); +extern int zio_handle_label_injection(zio_t *zio, int error); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZIO_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_checksum.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_checksum.h new file mode 100644 index 000000000000..da407399da06 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_checksum.h @@ -0,0 +1,73 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZIO_CHECKSUM_H +#define _SYS_ZIO_CHECKSUM_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Signature for checksum functions. + */ +typedef void zio_checksum_t(const void *data, uint64_t size, zio_cksum_t *zcp); + +/* + * Information about each checksum function. + */ +typedef struct zio_checksum_info { + zio_checksum_t *ci_func[2]; /* checksum function for each byteorder */ + int ci_correctable; /* number of correctable bits */ + int ci_zbt; /* uses zio block tail? */ + char *ci_name; /* descriptive name */ +} zio_checksum_info_t; + +extern zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS]; + +/* + * Checksum routines. + */ +extern zio_checksum_t fletcher_2_native; +extern zio_checksum_t fletcher_4_native; +extern zio_checksum_t fletcher_4_incremental_native; + +extern zio_checksum_t fletcher_2_byteswap; +extern zio_checksum_t fletcher_4_byteswap; +extern zio_checksum_t fletcher_4_incremental_byteswap; + +extern zio_checksum_t zio_checksum_SHA256; + +extern void zio_checksum_compute(zio_t *zio, enum zio_checksum checksum, + void *data, uint64_t size); +extern int zio_checksum_error(zio_t *zio); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZIO_CHECKSUM_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_compress.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_compress.h new file mode 100644 index 000000000000..66ee8d45b3b6 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_compress.h @@ -0,0 +1,82 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZIO_COMPRESS_H +#define _SYS_ZIO_COMPRESS_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Common signature for all zio compress/decompress functions. + */ +typedef size_t zio_compress_func_t(void *src, void *dst, + size_t s_len, size_t d_len, int); +typedef int zio_decompress_func_t(void *src, void *dst, + size_t s_len, size_t d_len, int); + +/* + * Information about each compression function. + */ +typedef struct zio_compress_info { + zio_compress_func_t *ci_compress; /* compression function */ + zio_decompress_func_t *ci_decompress; /* decompression function */ + int ci_level; /* level parameter */ + char *ci_name; /* algorithm name */ +} zio_compress_info_t; + +extern zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS]; + +/* + * Compression routines. + */ +extern size_t lzjb_compress(void *src, void *dst, size_t s_len, size_t d_len, + int level); +extern int lzjb_decompress(void *src, void *dst, size_t s_len, size_t d_len, + int level); +extern size_t gzip_compress(void *src, void *dst, size_t s_len, size_t d_len, + int level); +extern int gzip_decompress(void *src, void *dst, size_t s_len, size_t d_len, + int level); + +/* + * Compress and decompress data if necessary. + */ +extern int zio_compress_data(int cpfunc, void *src, uint64_t srcsize, + void **destp, uint64_t *destsizep, uint64_t *destbufsizep); +extern int zio_decompress_data(int cpfunc, void *src, uint64_t srcsize, + void *dest, uint64_t destsize); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZIO_COMPRESS_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_impl.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_impl.h new file mode 100644 index 000000000000..e7503b733cc0 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zio_impl.h @@ -0,0 +1,143 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _ZIO_IMPL_H +#define _ZIO_IMPL_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * I/O Groups: pipeline stage definitions. + */ +typedef enum zio_stage { + ZIO_STAGE_OPEN = 0, /* RWFCI */ + + ZIO_STAGE_ISSUE_ASYNC, /* -W--- */ + + ZIO_STAGE_READ_BP_INIT, /* R---- */ + ZIO_STAGE_WRITE_BP_INIT, /* -W--- */ + + ZIO_STAGE_CHECKSUM_GENERATE, /* -W--- */ + + ZIO_STAGE_GANG_ASSEMBLE, /* RWFC- */ + ZIO_STAGE_GANG_ISSUE, /* RWFC- */ + + ZIO_STAGE_DVA_ALLOCATE, /* -W--- */ + ZIO_STAGE_DVA_FREE, /* --F-- */ + ZIO_STAGE_DVA_CLAIM, /* ---C- */ + + ZIO_STAGE_READY, /* RWFCI */ + + ZIO_STAGE_VDEV_IO_START, /* RW--I */ + ZIO_STAGE_VDEV_IO_DONE, /* RW--I */ + ZIO_STAGE_VDEV_IO_ASSESS, /* RW--I */ + + ZIO_STAGE_CHECKSUM_VERIFY, /* R---- */ + + ZIO_STAGE_DONE, /* RWFCI */ + ZIO_STAGES +} zio_stage_t; + +#define ZIO_INTERLOCK_STAGES \ + ((1U << ZIO_STAGE_READY) | \ + (1U << ZIO_STAGE_DONE)) + +#define ZIO_INTERLOCK_PIPELINE \ + ZIO_INTERLOCK_STAGES + +#define ZIO_VDEV_IO_STAGES \ + ((1U << ZIO_STAGE_VDEV_IO_START) | \ + (1U << ZIO_STAGE_VDEV_IO_DONE) | \ + (1U << ZIO_STAGE_VDEV_IO_ASSESS)) + +#define ZIO_VDEV_CHILD_PIPELINE \ + (ZIO_VDEV_IO_STAGES | \ + (1U << ZIO_STAGE_DONE)) + +#define ZIO_READ_COMMON_STAGES \ + (ZIO_INTERLOCK_STAGES | \ + ZIO_VDEV_IO_STAGES | \ + (1U << ZIO_STAGE_CHECKSUM_VERIFY)) + +#define ZIO_READ_PHYS_PIPELINE \ + ZIO_READ_COMMON_STAGES + +#define ZIO_READ_PIPELINE \ + (ZIO_READ_COMMON_STAGES | \ + (1U << ZIO_STAGE_READ_BP_INIT)) + +#define ZIO_WRITE_COMMON_STAGES \ + (ZIO_INTERLOCK_STAGES | \ + ZIO_VDEV_IO_STAGES | \ + (1U << ZIO_STAGE_ISSUE_ASYNC) | \ + (1U << ZIO_STAGE_CHECKSUM_GENERATE)) + +#define ZIO_WRITE_PHYS_PIPELINE \ + ZIO_WRITE_COMMON_STAGES + +#define ZIO_REWRITE_PIPELINE \ + (ZIO_WRITE_COMMON_STAGES | \ + (1U << ZIO_STAGE_WRITE_BP_INIT)) + +#define ZIO_WRITE_PIPELINE \ + (ZIO_WRITE_COMMON_STAGES | \ + (1U << ZIO_STAGE_WRITE_BP_INIT) | \ + (1U << ZIO_STAGE_DVA_ALLOCATE)) + +#define ZIO_GANG_STAGES \ + ((1U << ZIO_STAGE_GANG_ASSEMBLE) | \ + (1U << ZIO_STAGE_GANG_ISSUE)) + +#define ZIO_FREE_PIPELINE \ + (ZIO_INTERLOCK_STAGES | \ + (1U << ZIO_STAGE_DVA_FREE)) + +#define ZIO_CLAIM_PIPELINE \ + (ZIO_INTERLOCK_STAGES | \ + (1U << ZIO_STAGE_DVA_CLAIM)) + +#define ZIO_IOCTL_PIPELINE \ + (ZIO_INTERLOCK_STAGES | \ + (1U << ZIO_STAGE_VDEV_IO_START) | \ + (1U << ZIO_STAGE_VDEV_IO_ASSESS)) + +#define ZIO_CONFIG_LOCK_BLOCKING_STAGES \ + ((1U << ZIO_STAGE_VDEV_IO_START) | \ + (1U << ZIO_STAGE_DVA_ALLOCATE) | \ + (1U << ZIO_STAGE_DVA_CLAIM)) + +extern void zio_inject_init(void); +extern void zio_inject_fini(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZIO_IMPL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zvol.h b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zvol.h new file mode 100644 index 000000000000..06adc667e124 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/sys/zvol.h @@ -0,0 +1,70 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#ifndef _SYS_ZVOL_H +#define _SYS_ZVOL_H + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZVOL_OBJ 1ULL +#define ZVOL_ZAP_OBJ 2ULL + +#ifdef _KERNEL +extern int zvol_check_volsize(uint64_t volsize, uint64_t blocksize); +extern int zvol_check_volblocksize(uint64_t volblocksize); +extern int zvol_get_stats(objset_t *os, nvlist_t *nv); +extern void zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); +extern int zvol_create_minor(const char *, major_t); +extern int zvol_remove_minor(const char *); +extern int zvol_set_volsize(const char *, major_t, uint64_t); +extern int zvol_set_volblocksize(const char *, uint64_t); + +extern int zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr); +extern int zvol_dump(dev_t dev, caddr_t addr, daddr_t offset, int nblocks); +extern int zvol_close(dev_t dev, int flag, int otyp, cred_t *cr); +extern int zvol_strategy(buf_t *bp); +extern int zvol_read(dev_t dev, uio_t *uiop, cred_t *cr); +extern int zvol_write(dev_t dev, uio_t *uiop, cred_t *cr); +extern int zvol_aread(dev_t dev, struct aio_req *aio, cred_t *cr); +extern int zvol_awrite(dev_t dev, struct aio_req *aio, cred_t *cr); +extern int zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, + int *rvalp); +extern int zvol_busy(void); +extern void zvol_init(void); +extern void zvol_fini(void); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_ZVOL_H */ diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/txg.c b/external/cddl/osnet/dist/uts/common/fs/zfs/txg.c new file mode 100644 index 000000000000..2bbf2f086c15 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/txg.c @@ -0,0 +1,627 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +/* + * Pool-wide transaction groups. + */ + +static void txg_sync_thread(dsl_pool_t *dp); +static void txg_quiesce_thread(dsl_pool_t *dp); + +int zfs_txg_timeout = 30; /* max seconds worth of delta per txg */ + +/* + * Prepare the txg subsystem. + */ +void +txg_init(dsl_pool_t *dp, uint64_t txg) +{ + tx_state_t *tx = &dp->dp_tx; + int c; + bzero(tx, sizeof (tx_state_t)); + + tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); + + for (c = 0; c < max_ncpus; c++) { + int i; + + mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); + for (i = 0; i < TXG_SIZE; i++) { + cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, + NULL); + } + } + + rw_init(&tx->tx_suspend, NULL, RW_DEFAULT, NULL); + mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); + + tx->tx_open_txg = txg; +} + +/* + * Close down the txg subsystem. + */ +void +txg_fini(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + int c; + + ASSERT(tx->tx_threads == 0); + + rw_destroy(&tx->tx_suspend); + mutex_destroy(&tx->tx_sync_lock); + + for (c = 0; c < max_ncpus; c++) { + int i; + + mutex_destroy(&tx->tx_cpu[c].tc_lock); + for (i = 0; i < TXG_SIZE; i++) + cv_destroy(&tx->tx_cpu[c].tc_cv[i]); + } + + kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); + + bzero(tx, sizeof (tx_state_t)); +} + +/* + * Start syncing transaction groups. + */ +void +txg_sync_start(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + + mutex_enter(&tx->tx_sync_lock); + + dprintf("pool %p\n", dp); + + ASSERT(tx->tx_threads == 0); + + tx->tx_threads = 2; + + tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread, + dp, 0, &p0, TS_RUN, minclsyspri); + + /* + * The sync thread can need a larger-than-default stack size on + * 32-bit x86. This is due in part to nested pools and + * scrub_visitbp() recursion. + */ + tx->tx_sync_thread = thread_create(NULL, 12<<10, txg_sync_thread, + dp, 0, &p0, TS_RUN, minclsyspri); + + mutex_exit(&tx->tx_sync_lock); +} + +static void +txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr) +{ + CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG); + mutex_enter(&tx->tx_sync_lock); +} + +static void +txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp) +{ + ASSERT(*tpp != NULL); + *tpp = NULL; + tx->tx_threads--; + cv_broadcast(&tx->tx_exit_cv); + CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */ + thread_exit(); +} + +static void +txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time) +{ + CALLB_CPR_SAFE_BEGIN(cpr); + + if (time) + (void) cv_timedwait(cv, &tx->tx_sync_lock, lbolt + time); + else + cv_wait(cv, &tx->tx_sync_lock); + + CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); +} + +/* + * Stop syncing transaction groups. + */ +void +txg_sync_stop(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + + dprintf("pool %p\n", dp); + /* + * Finish off any work in progress. + */ + ASSERT(tx->tx_threads == 2); + txg_wait_synced(dp, 0); + + /* + * Wake all sync threads and wait for them to die. + */ + mutex_enter(&tx->tx_sync_lock); + + ASSERT(tx->tx_threads == 2); + + tx->tx_exiting = 1; + + cv_broadcast(&tx->tx_quiesce_more_cv); + cv_broadcast(&tx->tx_quiesce_done_cv); + cv_broadcast(&tx->tx_sync_more_cv); + + while (tx->tx_threads != 0) + cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); + + tx->tx_exiting = 0; + + mutex_exit(&tx->tx_sync_lock); +} + +uint64_t +txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) +{ + tx_state_t *tx = &dp->dp_tx; + tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID]; + uint64_t txg; + + mutex_enter(&tc->tc_lock); + + txg = tx->tx_open_txg; + tc->tc_count[txg & TXG_MASK]++; + + th->th_cpu = tc; + th->th_txg = txg; + + return (txg); +} + +void +txg_rele_to_quiesce(txg_handle_t *th) +{ + tx_cpu_t *tc = th->th_cpu; + + mutex_exit(&tc->tc_lock); +} + +void +txg_rele_to_sync(txg_handle_t *th) +{ + tx_cpu_t *tc = th->th_cpu; + int g = th->th_txg & TXG_MASK; + + mutex_enter(&tc->tc_lock); + ASSERT(tc->tc_count[g] != 0); + if (--tc->tc_count[g] == 0) + cv_broadcast(&tc->tc_cv[g]); + mutex_exit(&tc->tc_lock); + + th->th_cpu = NULL; /* defensive */ +} + +static void +txg_quiesce(dsl_pool_t *dp, uint64_t txg) +{ + tx_state_t *tx = &dp->dp_tx; + int g = txg & TXG_MASK; + int c; + + /* + * Grab all tx_cpu locks so nobody else can get into this txg. + */ + for (c = 0; c < max_ncpus; c++) + mutex_enter(&tx->tx_cpu[c].tc_lock); + + ASSERT(txg == tx->tx_open_txg); + tx->tx_open_txg++; + + /* + * Now that we've incremented tx_open_txg, we can let threads + * enter the next transaction group. + */ + for (c = 0; c < max_ncpus; c++) + mutex_exit(&tx->tx_cpu[c].tc_lock); + + /* + * Quiesce the transaction group by waiting for everyone to txg_exit(). + */ + for (c = 0; c < max_ncpus; c++) { + tx_cpu_t *tc = &tx->tx_cpu[c]; + mutex_enter(&tc->tc_lock); + while (tc->tc_count[g] != 0) + cv_wait(&tc->tc_cv[g], &tc->tc_lock); + mutex_exit(&tc->tc_lock); + } +} + +static void +txg_sync_thread(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + callb_cpr_t cpr; + uint64_t start, delta; + + txg_thread_enter(tx, &cpr); + + start = delta = 0; + for (;;) { + uint64_t timer, timeout = zfs_txg_timeout * hz; + uint64_t txg; + + /* + * We sync when we're scrubbing, there's someone waiting + * on us, or the quiesce thread has handed off a txg to + * us, or we have reached our timeout. + */ + timer = (delta >= timeout ? 0 : timeout - delta); + while ((dp->dp_scrub_func == SCRUB_FUNC_NONE || + spa_shutting_down(dp->dp_spa)) && + !tx->tx_exiting && timer > 0 && + tx->tx_synced_txg >= tx->tx_sync_txg_waiting && + tx->tx_quiesced_txg == 0) { + dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", + tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); + txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); + delta = lbolt - start; + timer = (delta > timeout ? 0 : timeout - delta); + } + + /* + * Wait until the quiesce thread hands off a txg to us, + * prompting it to do so if necessary. + */ + while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) { + if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1) + tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1; + cv_broadcast(&tx->tx_quiesce_more_cv); + txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0); + } + + if (tx->tx_exiting) + txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); + + rw_enter(&tx->tx_suspend, RW_WRITER); + + /* + * Consume the quiesced txg which has been handed off to + * us. This may cause the quiescing thread to now be + * able to quiesce another txg, so we must signal it. + */ + txg = tx->tx_quiesced_txg; + tx->tx_quiesced_txg = 0; + tx->tx_syncing_txg = txg; + cv_broadcast(&tx->tx_quiesce_more_cv); + rw_exit(&tx->tx_suspend); + + dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", + txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); + mutex_exit(&tx->tx_sync_lock); + + start = lbolt; + spa_sync(dp->dp_spa, txg); + delta = lbolt - start; + + mutex_enter(&tx->tx_sync_lock); + rw_enter(&tx->tx_suspend, RW_WRITER); + tx->tx_synced_txg = txg; + tx->tx_syncing_txg = 0; + rw_exit(&tx->tx_suspend); + cv_broadcast(&tx->tx_sync_done_cv); + } +} + +static void +txg_quiesce_thread(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + callb_cpr_t cpr; + + txg_thread_enter(tx, &cpr); + + for (;;) { + uint64_t txg; + + /* + * We quiesce when there's someone waiting on us. + * However, we can only have one txg in "quiescing" or + * "quiesced, waiting to sync" state. So we wait until + * the "quiesced, waiting to sync" txg has been consumed + * by the sync thread. + */ + while (!tx->tx_exiting && + (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting || + tx->tx_quiesced_txg != 0)) + txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0); + + if (tx->tx_exiting) + txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread); + + txg = tx->tx_open_txg; + dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", + txg, tx->tx_quiesce_txg_waiting, + tx->tx_sync_txg_waiting); + mutex_exit(&tx->tx_sync_lock); + txg_quiesce(dp, txg); + mutex_enter(&tx->tx_sync_lock); + + /* + * Hand this txg off to the sync thread. + */ + dprintf("quiesce done, handing off txg %llu\n", txg); + tx->tx_quiesced_txg = txg; + cv_broadcast(&tx->tx_sync_more_cv); + cv_broadcast(&tx->tx_quiesce_done_cv); + } +} + +/* + * Delay this thread by 'ticks' if we are still in the open transaction + * group and there is already a waiting txg quiesing or quiesced. Abort + * the delay if this txg stalls or enters the quiesing state. + */ +void +txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks) +{ + tx_state_t *tx = &dp->dp_tx; + int timeout = lbolt + ticks; + + /* don't delay if this txg could transition to quiesing immediately */ + if (tx->tx_open_txg > txg || + tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) + return; + + mutex_enter(&tx->tx_sync_lock); + if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) { + mutex_exit(&tx->tx_sync_lock); + return; + } + + while (lbolt < timeout && + tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) + (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock, + timeout); + + mutex_exit(&tx->tx_sync_lock); +} + +void +txg_wait_synced(dsl_pool_t *dp, uint64_t txg) +{ + tx_state_t *tx = &dp->dp_tx; + + mutex_enter(&tx->tx_sync_lock); + ASSERT(tx->tx_threads == 2); + if (txg == 0) + txg = tx->tx_open_txg; + if (tx->tx_sync_txg_waiting < txg) + tx->tx_sync_txg_waiting = txg; + dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", + txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); + while (tx->tx_synced_txg < txg) { + dprintf("broadcasting sync more " + "tx_synced=%llu waiting=%llu dp=%p\n", + tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); + cv_broadcast(&tx->tx_sync_more_cv); + cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock); + } + mutex_exit(&tx->tx_sync_lock); +} + +void +txg_wait_open(dsl_pool_t *dp, uint64_t txg) +{ + tx_state_t *tx = &dp->dp_tx; + + mutex_enter(&tx->tx_sync_lock); + ASSERT(tx->tx_threads == 2); + if (txg == 0) + txg = tx->tx_open_txg + 1; + if (tx->tx_quiesce_txg_waiting < txg) + tx->tx_quiesce_txg_waiting = txg; + dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", + txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); + while (tx->tx_open_txg < txg) { + cv_broadcast(&tx->tx_quiesce_more_cv); + cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); + } + mutex_exit(&tx->tx_sync_lock); +} + +boolean_t +txg_stalled(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg); +} + +boolean_t +txg_sync_waiting(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + + return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting || + tx->tx_quiesced_txg != 0); +} + +void +txg_suspend(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + /* XXX some code paths suspend when they are already suspended! */ + rw_enter(&tx->tx_suspend, RW_READER); +} + +void +txg_resume(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + rw_exit(&tx->tx_suspend); +} + +/* + * Per-txg object lists. + */ +void +txg_list_create(txg_list_t *tl, size_t offset) +{ + int t; + + mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL); + + tl->tl_offset = offset; + + for (t = 0; t < TXG_SIZE; t++) + tl->tl_head[t] = NULL; +} + +void +txg_list_destroy(txg_list_t *tl) +{ + int t; + + for (t = 0; t < TXG_SIZE; t++) + ASSERT(txg_list_empty(tl, t)); + + mutex_destroy(&tl->tl_lock); +} + +int +txg_list_empty(txg_list_t *tl, uint64_t txg) +{ + return (tl->tl_head[txg & TXG_MASK] == NULL); +} + +/* + * Add an entry to the list. + * Returns 0 if it's a new entry, 1 if it's already there. + */ +int +txg_list_add(txg_list_t *tl, void *p, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); + int already_on_list; + + mutex_enter(&tl->tl_lock); + already_on_list = tn->tn_member[t]; + if (!already_on_list) { + tn->tn_member[t] = 1; + tn->tn_next[t] = tl->tl_head[t]; + tl->tl_head[t] = tn; + } + mutex_exit(&tl->tl_lock); + + return (already_on_list); +} + +/* + * Remove the head of the list and return it. + */ +void * +txg_list_remove(txg_list_t *tl, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn; + void *p = NULL; + + mutex_enter(&tl->tl_lock); + if ((tn = tl->tl_head[t]) != NULL) { + p = (char *)tn - tl->tl_offset; + tl->tl_head[t] = tn->tn_next[t]; + tn->tn_next[t] = NULL; + tn->tn_member[t] = 0; + } + mutex_exit(&tl->tl_lock); + + return (p); +} + +/* + * Remove a specific item from the list and return it. + */ +void * +txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn, **tp; + + mutex_enter(&tl->tl_lock); + + for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) { + if ((char *)tn - tl->tl_offset == p) { + *tp = tn->tn_next[t]; + tn->tn_next[t] = NULL; + tn->tn_member[t] = 0; + mutex_exit(&tl->tl_lock); + return (p); + } + } + + mutex_exit(&tl->tl_lock); + + return (NULL); +} + +int +txg_list_member(txg_list_t *tl, void *p, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); + + return (tn->tn_member[t]); +} + +/* + * Walk a txg list -- only safe if you know it's not changing. + */ +void * +txg_list_head(txg_list_t *tl, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn = tl->tl_head[t]; + + return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); +} + +void * +txg_list_next(txg_list_t *tl, void *p, uint64_t txg) +{ + int t = txg & TXG_MASK; + txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); + + tn = tn->tn_next[t]; + + return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/uberblock.c b/external/cddl/osnet/dist/uts/common/fs/zfs/uberblock.c new file mode 100644 index 000000000000..34d7e0c3ac74 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/uberblock.c @@ -0,0 +1,63 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +int +uberblock_verify(uberblock_t *ub) +{ + if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) + byteswap_uint64_array(ub, sizeof (uberblock_t)); + + if (ub->ub_magic != UBERBLOCK_MAGIC) + return (EINVAL); + + return (0); +} + +/* + * Update the uberblock and return a boolean value indicating whether + * anything changed in this transaction group. + */ +int +uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg) +{ + ASSERT(ub->ub_txg < txg); + + /* + * We explicitly do not set ub_version here, so that older versions + * continue to be written with the previous uberblock version. + */ + ub->ub_magic = UBERBLOCK_MAGIC; + ub->ub_txg = txg; + ub->ub_guid_sum = rvd->vdev_guid_sum; + ub->ub_timestamp = gethrestime_sec(); + + return (ub->ub_rootbp.blk_birth == txg); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/unique.c b/external/cddl/osnet/dist/uts/common/fs/zfs/unique.c new file mode 100644 index 000000000000..fbe7b619a29a --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/unique.c @@ -0,0 +1,116 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include + +static avl_tree_t unique_avl; +static kmutex_t unique_mtx; + +typedef struct unique { + avl_node_t un_link; + uint64_t un_value; +} unique_t; + +#define UNIQUE_MASK ((1ULL << UNIQUE_BITS) - 1) + +static int +unique_compare(const void *a, const void *b) +{ + const unique_t *una = a; + const unique_t *unb = b; + + if (una->un_value < unb->un_value) + return (-1); + if (una->un_value > unb->un_value) + return (+1); + return (0); +} + +void +unique_init(void) +{ + avl_create(&unique_avl, unique_compare, + sizeof (unique_t), offsetof(unique_t, un_link)); + mutex_init(&unique_mtx, NULL, MUTEX_DEFAULT, NULL); +} + +void +unique_fini(void) +{ + avl_destroy(&unique_avl); + mutex_destroy(&unique_mtx); +} + +uint64_t +unique_create(void) +{ + uint64_t value = unique_insert(0); + unique_remove(value); + return (value); +} + +uint64_t +unique_insert(uint64_t value) +{ + avl_index_t idx; + unique_t *un = kmem_alloc(sizeof (unique_t), KM_SLEEP); + + un->un_value = value; + + mutex_enter(&unique_mtx); + while (un->un_value == 0 || un->un_value & ~UNIQUE_MASK || + avl_find(&unique_avl, un, &idx)) { + mutex_exit(&unique_mtx); + (void) random_get_pseudo_bytes((void*)&un->un_value, + sizeof (un->un_value)); + un->un_value &= UNIQUE_MASK; + mutex_enter(&unique_mtx); + } + + avl_insert(&unique_avl, un, idx); + mutex_exit(&unique_mtx); + + return (un->un_value); +} + +void +unique_remove(uint64_t value) +{ + unique_t un_tofind; + unique_t *un; + + un_tofind.un_value = value; + mutex_enter(&unique_mtx); + un = avl_find(&unique_avl, &un_tofind, NULL); + if (un != NULL) { + avl_remove(&unique_avl, un); + kmem_free(un, sizeof (unique_t)); + } + mutex_exit(&unique_mtx); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev.c new file mode 100644 index 000000000000..16a27e514a41 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev.c @@ -0,0 +1,2425 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Virtual device management. + */ + +static vdev_ops_t *vdev_ops_table[] = { + &vdev_root_ops, + &vdev_raidz_ops, + &vdev_mirror_ops, + &vdev_replacing_ops, + &vdev_spare_ops, + &vdev_disk_ops, + &vdev_file_ops, + &vdev_missing_ops, + NULL +}; + +/* maximum scrub/resilver I/O queue per leaf vdev */ +int zfs_scrub_limit = 10; + +/* + * Given a vdev type, return the appropriate ops vector. + */ +static vdev_ops_t * +vdev_getops(const char *type) +{ + vdev_ops_t *ops, **opspp; + + for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) + if (strcmp(ops->vdev_op_type, type) == 0) + break; + + return (ops); +} + +/* + * Default asize function: return the MAX of psize with the asize of + * all children. This is what's used by anything other than RAID-Z. + */ +uint64_t +vdev_default_asize(vdev_t *vd, uint64_t psize) +{ + uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); + uint64_t csize; + uint64_t c; + + for (c = 0; c < vd->vdev_children; c++) { + csize = vdev_psize_to_asize(vd->vdev_child[c], psize); + asize = MAX(asize, csize); + } + + return (asize); +} + +/* + * Get the replaceable or attachable device size. + * If the parent is a mirror or raidz, the replaceable size is the minimum + * psize of all its children. For the rest, just return our own psize. + * + * e.g. + * psize rsize + * root - - + * mirror/raidz - - + * disk1 20g 20g + * disk2 40g 20g + * disk3 80g 80g + */ +uint64_t +vdev_get_rsize(vdev_t *vd) +{ + vdev_t *pvd, *cvd; + uint64_t c, rsize; + + pvd = vd->vdev_parent; + + /* + * If our parent is NULL or the root, just return our own psize. + */ + if (pvd == NULL || pvd->vdev_parent == NULL) + return (vd->vdev_psize); + + rsize = 0; + + for (c = 0; c < pvd->vdev_children; c++) { + cvd = pvd->vdev_child[c]; + rsize = MIN(rsize - 1, cvd->vdev_psize - 1) + 1; + } + + return (rsize); +} + +vdev_t * +vdev_lookup_top(spa_t *spa, uint64_t vdev) +{ + vdev_t *rvd = spa->spa_root_vdev; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); + + if (vdev < rvd->vdev_children) { + ASSERT(rvd->vdev_child[vdev] != NULL); + return (rvd->vdev_child[vdev]); + } + + return (NULL); +} + +vdev_t * +vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) +{ + int c; + vdev_t *mvd; + + if (vd->vdev_guid == guid) + return (vd); + + for (c = 0; c < vd->vdev_children; c++) + if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != + NULL) + return (mvd); + + return (NULL); +} + +void +vdev_add_child(vdev_t *pvd, vdev_t *cvd) +{ + size_t oldsize, newsize; + uint64_t id = cvd->vdev_id; + vdev_t **newchild; + + ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); + ASSERT(cvd->vdev_parent == NULL); + + cvd->vdev_parent = pvd; + + if (pvd == NULL) + return; + + ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); + + oldsize = pvd->vdev_children * sizeof (vdev_t *); + pvd->vdev_children = MAX(pvd->vdev_children, id + 1); + newsize = pvd->vdev_children * sizeof (vdev_t *); + + newchild = kmem_zalloc(newsize, KM_SLEEP); + if (pvd->vdev_child != NULL) { + bcopy(pvd->vdev_child, newchild, oldsize); + kmem_free(pvd->vdev_child, oldsize); + } + + pvd->vdev_child = newchild; + pvd->vdev_child[id] = cvd; + + cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); + ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); + + /* + * Walk up all ancestors to update guid sum. + */ + for (; pvd != NULL; pvd = pvd->vdev_parent) + pvd->vdev_guid_sum += cvd->vdev_guid_sum; + + if (cvd->vdev_ops->vdev_op_leaf) + cvd->vdev_spa->spa_scrub_maxinflight += zfs_scrub_limit; +} + +void +vdev_remove_child(vdev_t *pvd, vdev_t *cvd) +{ + int c; + uint_t id = cvd->vdev_id; + + ASSERT(cvd->vdev_parent == pvd); + + if (pvd == NULL) + return; + + ASSERT(id < pvd->vdev_children); + ASSERT(pvd->vdev_child[id] == cvd); + + pvd->vdev_child[id] = NULL; + cvd->vdev_parent = NULL; + + for (c = 0; c < pvd->vdev_children; c++) + if (pvd->vdev_child[c]) + break; + + if (c == pvd->vdev_children) { + kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); + pvd->vdev_child = NULL; + pvd->vdev_children = 0; + } + + /* + * Walk up all ancestors to update guid sum. + */ + for (; pvd != NULL; pvd = pvd->vdev_parent) + pvd->vdev_guid_sum -= cvd->vdev_guid_sum; + + if (cvd->vdev_ops->vdev_op_leaf) + cvd->vdev_spa->spa_scrub_maxinflight -= zfs_scrub_limit; +} + +/* + * Remove any holes in the child array. + */ +void +vdev_compact_children(vdev_t *pvd) +{ + vdev_t **newchild, *cvd; + int oldc = pvd->vdev_children; + int newc, c; + + ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + for (c = newc = 0; c < oldc; c++) + if (pvd->vdev_child[c]) + newc++; + + newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); + + for (c = newc = 0; c < oldc; c++) { + if ((cvd = pvd->vdev_child[c]) != NULL) { + newchild[newc] = cvd; + cvd->vdev_id = newc++; + } + } + + kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); + pvd->vdev_child = newchild; + pvd->vdev_children = newc; +} + +/* + * Allocate and minimally initialize a vdev_t. + */ +static vdev_t * +vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) +{ + vdev_t *vd; + + vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); + + if (spa->spa_root_vdev == NULL) { + ASSERT(ops == &vdev_root_ops); + spa->spa_root_vdev = vd; + } + + if (guid == 0) { + if (spa->spa_root_vdev == vd) { + /* + * The root vdev's guid will also be the pool guid, + * which must be unique among all pools. + */ + while (guid == 0 || spa_guid_exists(guid, 0)) + guid = spa_get_random(-1ULL); + } else { + /* + * Any other vdev's guid must be unique within the pool. + */ + while (guid == 0 || + spa_guid_exists(spa_guid(spa), guid)) + guid = spa_get_random(-1ULL); + } + ASSERT(!spa_guid_exists(spa_guid(spa), guid)); + } + + vd->vdev_spa = spa; + vd->vdev_id = id; + vd->vdev_guid = guid; + vd->vdev_guid_sum = guid; + vd->vdev_ops = ops; + vd->vdev_state = VDEV_STATE_CLOSED; + + mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); + space_map_create(&vd->vdev_dtl_map, 0, -1ULL, 0, &vd->vdev_dtl_lock); + space_map_create(&vd->vdev_dtl_scrub, 0, -1ULL, 0, &vd->vdev_dtl_lock); + txg_list_create(&vd->vdev_ms_list, + offsetof(struct metaslab, ms_txg_node)); + txg_list_create(&vd->vdev_dtl_list, + offsetof(struct vdev, vdev_dtl_node)); + vd->vdev_stat.vs_timestamp = gethrtime(); + vdev_queue_init(vd); + vdev_cache_init(vd); + + return (vd); +} + +/* + * Allocate a new vdev. The 'alloctype' is used to control whether we are + * creating a new vdev or loading an existing one - the behavior is slightly + * different for each case. + */ +int +vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, + int alloctype) +{ + vdev_ops_t *ops; + char *type; + uint64_t guid = 0, islog, nparity; + vdev_t *vd; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) + return (EINVAL); + + if ((ops = vdev_getops(type)) == NULL) + return (EINVAL); + + /* + * If this is a load, get the vdev guid from the nvlist. + * Otherwise, vdev_alloc_common() will generate one for us. + */ + if (alloctype == VDEV_ALLOC_LOAD) { + uint64_t label_id; + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || + label_id != id) + return (EINVAL); + + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) + return (EINVAL); + } else if (alloctype == VDEV_ALLOC_SPARE) { + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) + return (EINVAL); + } else if (alloctype == VDEV_ALLOC_L2CACHE) { + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) + return (EINVAL); + } + + /* + * The first allocated vdev must be of type 'root'. + */ + if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) + return (EINVAL); + + /* + * Determine whether we're a log vdev. + */ + islog = 0; + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); + if (islog && spa_version(spa) < SPA_VERSION_SLOGS) + return (ENOTSUP); + + /* + * Set the nparity property for RAID-Z vdevs. + */ + nparity = -1ULL; + if (ops == &vdev_raidz_ops) { + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, + &nparity) == 0) { + /* + * Currently, we can only support 2 parity devices. + */ + if (nparity == 0 || nparity > 2) + return (EINVAL); + /* + * Older versions can only support 1 parity device. + */ + if (nparity == 2 && + spa_version(spa) < SPA_VERSION_RAID6) + return (ENOTSUP); + } else { + /* + * We require the parity to be specified for SPAs that + * support multiple parity levels. + */ + if (spa_version(spa) >= SPA_VERSION_RAID6) + return (EINVAL); + /* + * Otherwise, we default to 1 parity device for RAID-Z. + */ + nparity = 1; + } + } else { + nparity = 0; + } + ASSERT(nparity != -1ULL); + + vd = vdev_alloc_common(spa, id, guid, ops); + + vd->vdev_islog = islog; + vd->vdev_nparity = nparity; + + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) + vd->vdev_path = spa_strdup(vd->vdev_path); + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) + vd->vdev_devid = spa_strdup(vd->vdev_devid); + if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, + &vd->vdev_physpath) == 0) + vd->vdev_physpath = spa_strdup(vd->vdev_physpath); + + /* + * Set the whole_disk property. If it's not specified, leave the value + * as -1. + */ + if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + &vd->vdev_wholedisk) != 0) + vd->vdev_wholedisk = -1ULL; + + /* + * Look for the 'not present' flag. This will only be set if the device + * was not present at the time of import. + */ + if (!spa->spa_import_faulted) + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, + &vd->vdev_not_present); + + /* + * Get the alignment requirement. + */ + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); + + /* + * If we're a top-level vdev, try to load the allocation parameters. + */ + if (parent && !parent->vdev_parent && alloctype == VDEV_ALLOC_LOAD) { + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, + &vd->vdev_ms_array); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, + &vd->vdev_ms_shift); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, + &vd->vdev_asize); + } + + /* + * If we're a leaf vdev, try to load the DTL object and other state. + */ + if (vd->vdev_ops->vdev_op_leaf && + (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE)) { + if (alloctype == VDEV_ALLOC_LOAD) { + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, + &vd->vdev_dtl.smo_object); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, + &vd->vdev_unspare); + } + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, + &vd->vdev_offline); + + /* + * When importing a pool, we want to ignore the persistent fault + * state, as the diagnosis made on another system may not be + * valid in the current context. + */ + if (spa->spa_load_state == SPA_LOAD_OPEN) { + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, + &vd->vdev_faulted); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, + &vd->vdev_degraded); + (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, + &vd->vdev_removed); + } + } + + /* + * Add ourselves to the parent's list of children. + */ + vdev_add_child(parent, vd); + + *vdp = vd; + + return (0); +} + +void +vdev_free(vdev_t *vd) +{ + int c; + spa_t *spa = vd->vdev_spa; + + /* + * vdev_free() implies closing the vdev first. This is simpler than + * trying to ensure complicated semantics for all callers. + */ + vdev_close(vd); + + ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); + + /* + * Free all children. + */ + for (c = 0; c < vd->vdev_children; c++) + vdev_free(vd->vdev_child[c]); + + ASSERT(vd->vdev_child == NULL); + ASSERT(vd->vdev_guid_sum == vd->vdev_guid); + + /* + * Discard allocation state. + */ + if (vd == vd->vdev_top) + vdev_metaslab_fini(vd); + + ASSERT3U(vd->vdev_stat.vs_space, ==, 0); + ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0); + ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0); + + /* + * Remove this vdev from its parent's child list. + */ + vdev_remove_child(vd->vdev_parent, vd); + + ASSERT(vd->vdev_parent == NULL); + + /* + * Clean up vdev structure. + */ + vdev_queue_fini(vd); + vdev_cache_fini(vd); + + if (vd->vdev_path) + spa_strfree(vd->vdev_path); + if (vd->vdev_devid) + spa_strfree(vd->vdev_devid); + if (vd->vdev_physpath) + spa_strfree(vd->vdev_physpath); + + if (vd->vdev_isspare) + spa_spare_remove(vd); + if (vd->vdev_isl2cache) + spa_l2cache_remove(vd); + + txg_list_destroy(&vd->vdev_ms_list); + txg_list_destroy(&vd->vdev_dtl_list); + mutex_enter(&vd->vdev_dtl_lock); + space_map_unload(&vd->vdev_dtl_map); + space_map_destroy(&vd->vdev_dtl_map); + space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); + space_map_destroy(&vd->vdev_dtl_scrub); + mutex_exit(&vd->vdev_dtl_lock); + mutex_destroy(&vd->vdev_dtl_lock); + mutex_destroy(&vd->vdev_stat_lock); + mutex_destroy(&vd->vdev_probe_lock); + + if (vd == spa->spa_root_vdev) + spa->spa_root_vdev = NULL; + + kmem_free(vd, sizeof (vdev_t)); +} + +/* + * Transfer top-level vdev state from svd to tvd. + */ +static void +vdev_top_transfer(vdev_t *svd, vdev_t *tvd) +{ + spa_t *spa = svd->vdev_spa; + metaslab_t *msp; + vdev_t *vd; + int t; + + ASSERT(tvd == tvd->vdev_top); + + tvd->vdev_ms_array = svd->vdev_ms_array; + tvd->vdev_ms_shift = svd->vdev_ms_shift; + tvd->vdev_ms_count = svd->vdev_ms_count; + + svd->vdev_ms_array = 0; + svd->vdev_ms_shift = 0; + svd->vdev_ms_count = 0; + + tvd->vdev_mg = svd->vdev_mg; + tvd->vdev_ms = svd->vdev_ms; + + svd->vdev_mg = NULL; + svd->vdev_ms = NULL; + + if (tvd->vdev_mg != NULL) + tvd->vdev_mg->mg_vd = tvd; + + tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; + tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; + tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; + + svd->vdev_stat.vs_alloc = 0; + svd->vdev_stat.vs_space = 0; + svd->vdev_stat.vs_dspace = 0; + + for (t = 0; t < TXG_SIZE; t++) { + while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) + (void) txg_list_add(&tvd->vdev_ms_list, msp, t); + while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) + (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); + if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) + (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); + } + + if (list_link_active(&svd->vdev_config_dirty_node)) { + vdev_config_clean(svd); + vdev_config_dirty(tvd); + } + + if (list_link_active(&svd->vdev_state_dirty_node)) { + vdev_state_clean(svd); + vdev_state_dirty(tvd); + } + + tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; + svd->vdev_deflate_ratio = 0; + + tvd->vdev_islog = svd->vdev_islog; + svd->vdev_islog = 0; +} + +static void +vdev_top_update(vdev_t *tvd, vdev_t *vd) +{ + int c; + + if (vd == NULL) + return; + + vd->vdev_top = tvd; + + for (c = 0; c < vd->vdev_children; c++) + vdev_top_update(tvd, vd->vdev_child[c]); +} + +/* + * Add a mirror/replacing vdev above an existing vdev. + */ +vdev_t * +vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) +{ + spa_t *spa = cvd->vdev_spa; + vdev_t *pvd = cvd->vdev_parent; + vdev_t *mvd; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); + + mvd->vdev_asize = cvd->vdev_asize; + mvd->vdev_ashift = cvd->vdev_ashift; + mvd->vdev_state = cvd->vdev_state; + + vdev_remove_child(pvd, cvd); + vdev_add_child(pvd, mvd); + cvd->vdev_id = mvd->vdev_children; + vdev_add_child(mvd, cvd); + vdev_top_update(cvd->vdev_top, cvd->vdev_top); + + if (mvd == mvd->vdev_top) + vdev_top_transfer(cvd, mvd); + + return (mvd); +} + +/* + * Remove a 1-way mirror/replacing vdev from the tree. + */ +void +vdev_remove_parent(vdev_t *cvd) +{ + vdev_t *mvd = cvd->vdev_parent; + vdev_t *pvd = mvd->vdev_parent; + + ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + ASSERT(mvd->vdev_children == 1); + ASSERT(mvd->vdev_ops == &vdev_mirror_ops || + mvd->vdev_ops == &vdev_replacing_ops || + mvd->vdev_ops == &vdev_spare_ops); + cvd->vdev_ashift = mvd->vdev_ashift; + + vdev_remove_child(mvd, cvd); + vdev_remove_child(pvd, mvd); + /* + * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. + * Otherwise, we could have detached an offline device, and when we + * go to import the pool we'll think we have two top-level vdevs, + * instead of a different version of the same top-level vdev. + */ + if (mvd->vdev_top == mvd) + cvd->vdev_guid = cvd->vdev_guid_sum = mvd->vdev_guid; + cvd->vdev_id = mvd->vdev_id; + vdev_add_child(pvd, cvd); + vdev_top_update(cvd->vdev_top, cvd->vdev_top); + + if (cvd == cvd->vdev_top) + vdev_top_transfer(mvd, cvd); + + ASSERT(mvd->vdev_children == 0); + vdev_free(mvd); +} + +int +vdev_metaslab_init(vdev_t *vd, uint64_t txg) +{ + spa_t *spa = vd->vdev_spa; + objset_t *mos = spa->spa_meta_objset; + metaslab_class_t *mc; + uint64_t m; + uint64_t oldc = vd->vdev_ms_count; + uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; + metaslab_t **mspp; + int error; + + if (vd->vdev_ms_shift == 0) /* not being allocated from yet */ + return (0); + + ASSERT(oldc <= newc); + + if (vd->vdev_islog) + mc = spa->spa_log_class; + else + mc = spa->spa_normal_class; + + if (vd->vdev_mg == NULL) + vd->vdev_mg = metaslab_group_create(mc, vd); + + mspp = kmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); + + if (oldc != 0) { + bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp)); + kmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); + } + + vd->vdev_ms = mspp; + vd->vdev_ms_count = newc; + + for (m = oldc; m < newc; m++) { + space_map_obj_t smo = { 0, 0, 0 }; + if (txg == 0) { + uint64_t object = 0; + error = dmu_read(mos, vd->vdev_ms_array, + m * sizeof (uint64_t), sizeof (uint64_t), &object); + if (error) + return (error); + if (object != 0) { + dmu_buf_t *db; + error = dmu_bonus_hold(mos, object, FTAG, &db); + if (error) + return (error); + ASSERT3U(db->db_size, >=, sizeof (smo)); + bcopy(db->db_data, &smo, sizeof (smo)); + ASSERT3U(smo.smo_object, ==, object); + dmu_buf_rele(db, FTAG); + } + } + vd->vdev_ms[m] = metaslab_init(vd->vdev_mg, &smo, + m << vd->vdev_ms_shift, 1ULL << vd->vdev_ms_shift, txg); + } + + return (0); +} + +void +vdev_metaslab_fini(vdev_t *vd) +{ + uint64_t m; + uint64_t count = vd->vdev_ms_count; + + if (vd->vdev_ms != NULL) { + for (m = 0; m < count; m++) + if (vd->vdev_ms[m] != NULL) + metaslab_fini(vd->vdev_ms[m]); + kmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); + vd->vdev_ms = NULL; + } +} + +typedef struct vdev_probe_stats { + boolean_t vps_readable; + boolean_t vps_writeable; + int vps_flags; + zio_t *vps_root; + vdev_t *vps_vd; +} vdev_probe_stats_t; + +static void +vdev_probe_done(zio_t *zio) +{ + vdev_probe_stats_t *vps = zio->io_private; + vdev_t *vd = vps->vps_vd; + + if (zio->io_type == ZIO_TYPE_READ) { + ASSERT(zio->io_vd == vd); + if (zio->io_error == 0) + vps->vps_readable = 1; + if (zio->io_error == 0 && (spa_mode & FWRITE)) { + zio_nowait(zio_write_phys(vps->vps_root, vd, + zio->io_offset, zio->io_size, zio->io_data, + ZIO_CHECKSUM_OFF, vdev_probe_done, vps, + ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); + } else { + zio_buf_free(zio->io_data, zio->io_size); + } + } else if (zio->io_type == ZIO_TYPE_WRITE) { + ASSERT(zio->io_vd == vd); + if (zio->io_error == 0) + vps->vps_writeable = 1; + zio_buf_free(zio->io_data, zio->io_size); + } else if (zio->io_type == ZIO_TYPE_NULL) { + ASSERT(zio->io_vd == NULL); + ASSERT(zio == vps->vps_root); + + vd->vdev_cant_read |= !vps->vps_readable; + vd->vdev_cant_write |= !vps->vps_writeable; + + if (vdev_readable(vd) && + (vdev_writeable(vd) || !(spa_mode & FWRITE))) { + zio->io_error = 0; + } else { + ASSERT(zio->io_error != 0); + zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, + zio->io_spa, vd, NULL, 0, 0); + zio->io_error = ENXIO; + } + kmem_free(vps, sizeof (*vps)); + } +} + +/* + * Determine whether this device is accessible by reading and writing + * to several known locations: the pad regions of each vdev label + * but the first (which we leave alone in case it contains a VTOC). + */ +zio_t * +vdev_probe(vdev_t *vd, zio_t *pio) +{ + spa_t *spa = vd->vdev_spa; + vdev_probe_stats_t *vps; + zio_t *zio; + + vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); + + vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | + ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_DONT_RETRY; + + if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { + /* + * vdev_cant_read and vdev_cant_write can only transition + * from TRUE to FALSE when we have the SCL_ZIO lock as writer; + * otherwise they can only transition from FALSE to TRUE. + * This ensures that any zio looking at these values can + * assume that failures persist for the life of the I/O. + * That's important because when a device has intermittent + * connectivity problems, we want to ensure that they're + * ascribed to the device (ENXIO) and not the zio (EIO). + * + * Since we hold SCL_ZIO as writer here, clear both values + * so the probe can reevaluate from first principles. + */ + vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; + vd->vdev_cant_read = B_FALSE; + vd->vdev_cant_write = B_FALSE; + } + + ASSERT(vd->vdev_ops->vdev_op_leaf); + + zio = zio_null(pio, spa, vdev_probe_done, vps, vps->vps_flags); + + vps->vps_root = zio; + vps->vps_vd = vd; + + for (int l = 1; l < VDEV_LABELS; l++) { + zio_nowait(zio_read_phys(zio, vd, + vdev_label_offset(vd->vdev_psize, l, + offsetof(vdev_label_t, vl_pad)), + VDEV_SKIP_SIZE, zio_buf_alloc(VDEV_SKIP_SIZE), + ZIO_CHECKSUM_OFF, vdev_probe_done, vps, + ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); + } + + return (zio); +} + +/* + * Prepare a virtual device for access. + */ +int +vdev_open(vdev_t *vd) +{ + int error; + int c; + uint64_t osize = 0; + uint64_t asize, psize; + uint64_t ashift = 0; + + ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || + vd->vdev_state == VDEV_STATE_CANT_OPEN || + vd->vdev_state == VDEV_STATE_OFFLINE); + + vd->vdev_stat.vs_aux = VDEV_AUX_NONE; + + if (!vd->vdev_removed && vd->vdev_faulted) { + ASSERT(vd->vdev_children == 0); + vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, + VDEV_AUX_ERR_EXCEEDED); + return (ENXIO); + } else if (vd->vdev_offline) { + ASSERT(vd->vdev_children == 0); + vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); + return (ENXIO); + } + + error = vd->vdev_ops->vdev_op_open(vd, &osize, &ashift); + + if (zio_injection_enabled && error == 0) + error = zio_handle_device_injection(vd, ENXIO); + + if (error) { + if (vd->vdev_removed && + vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) + vd->vdev_removed = B_FALSE; + + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + vd->vdev_stat.vs_aux); + return (error); + } + + vd->vdev_removed = B_FALSE; + + if (vd->vdev_degraded) { + ASSERT(vd->vdev_children == 0); + vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, + VDEV_AUX_ERR_EXCEEDED); + } else { + vd->vdev_state = VDEV_STATE_HEALTHY; + } + + for (c = 0; c < vd->vdev_children; c++) + if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, + VDEV_AUX_NONE); + break; + } + + osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); + + if (vd->vdev_children == 0) { + if (osize < SPA_MINDEVSIZE) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_TOO_SMALL); + return (EOVERFLOW); + } + psize = osize; + asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); + } else { + if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - + (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_TOO_SMALL); + return (EOVERFLOW); + } + psize = 0; + asize = osize; + } + + vd->vdev_psize = psize; + + if (vd->vdev_asize == 0) { + /* + * This is the first-ever open, so use the computed values. + * For testing purposes, a higher ashift can be requested. + */ + vd->vdev_asize = asize; + vd->vdev_ashift = MAX(ashift, vd->vdev_ashift); + } else { + /* + * Make sure the alignment requirement hasn't increased. + */ + if (ashift > vd->vdev_top->vdev_ashift) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_BAD_LABEL); + return (EINVAL); + } + + /* + * Make sure the device hasn't shrunk. + */ + if (asize < vd->vdev_asize) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_BAD_LABEL); + return (EINVAL); + } + + /* + * If all children are healthy and the asize has increased, + * then we've experienced dynamic LUN growth. + */ + if (vd->vdev_state == VDEV_STATE_HEALTHY && + asize > vd->vdev_asize) { + vd->vdev_asize = asize; + } + } + + /* + * Ensure we can issue some IO before declaring the + * vdev open for business. + */ + if (vd->vdev_ops->vdev_op_leaf && + (error = zio_wait(vdev_probe(vd, NULL))) != 0) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_IO_FAILURE); + return (error); + } + + /* + * If this is a top-level vdev, compute the raidz-deflation + * ratio. Note, we hard-code in 128k (1<<17) because it is the + * current "typical" blocksize. Even if SPA_MAXBLOCKSIZE + * changes, this algorithm must never change, or we will + * inconsistently account for existing bp's. + */ + if (vd->vdev_top == vd) { + vd->vdev_deflate_ratio = (1<<17) / + (vdev_psize_to_asize(vd, 1<<17) >> SPA_MINBLOCKSHIFT); + } + + /* + * If a leaf vdev has a DTL, and seems healthy, then kick off a + * resilver. But don't do this if we are doing a reopen for a + * scrub, since this would just restart the scrub we are already + * doing. + */ + if (vd->vdev_children == 0 && !vd->vdev_spa->spa_scrub_reopen) { + mutex_enter(&vd->vdev_dtl_lock); + if (vd->vdev_dtl_map.sm_space != 0 && vdev_writeable(vd)) + spa_async_request(vd->vdev_spa, SPA_ASYNC_RESILVER); + mutex_exit(&vd->vdev_dtl_lock); + } + + return (0); +} + +/* + * Called once the vdevs are all opened, this routine validates the label + * contents. This needs to be done before vdev_load() so that we don't + * inadvertently do repair I/Os to the wrong device. + * + * This function will only return failure if one of the vdevs indicates that it + * has since been destroyed or exported. This is only possible if + * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state + * will be updated but the function will return 0. + */ +int +vdev_validate(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + int c; + nvlist_t *label; + uint64_t guid, top_guid; + uint64_t state; + + for (c = 0; c < vd->vdev_children; c++) + if (vdev_validate(vd->vdev_child[c]) != 0) + return (EBADF); + + /* + * If the device has already failed, or was marked offline, don't do + * any further validation. Otherwise, label I/O will fail and we will + * overwrite the previous state. + */ + if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { + + if ((label = vdev_label_read_config(vd)) == NULL) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_BAD_LABEL); + return (0); + } + + if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, + &guid) != 0 || guid != spa_guid(spa)) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + nvlist_free(label); + return (0); + } + + /* + * If this vdev just became a top-level vdev because its + * sibling was detached, it will have adopted the parent's + * vdev guid -- but the label may or may not be on disk yet. + * Fortunately, either version of the label will have the + * same top guid, so if we're a top-level vdev, we can + * safely compare to that instead. + */ + if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, + &guid) != 0 || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, + &top_guid) != 0 || + (vd->vdev_guid != guid && + (vd->vdev_guid != top_guid || vd != vd->vdev_top))) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + nvlist_free(label); + return (0); + } + + if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, + &state) != 0) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + nvlist_free(label); + return (0); + } + + nvlist_free(label); + + if (spa->spa_load_state == SPA_LOAD_OPEN && + state != POOL_STATE_ACTIVE) + return (EBADF); + + /* + * If we were able to open and validate a vdev that was + * previously marked permanently unavailable, clear that state + * now. + */ + if (vd->vdev_not_present) + vd->vdev_not_present = 0; + } + + return (0); +} + +/* + * Close a virtual device. + */ +void +vdev_close(vdev_t *vd) +{ + vd->vdev_ops->vdev_op_close(vd); + + vdev_cache_purge(vd); + + /* + * We record the previous state before we close it, so that if we are + * doing a reopen(), we don't generate FMA ereports if we notice that + * it's still faulted. + */ + vd->vdev_prevstate = vd->vdev_state; + + if (vd->vdev_offline) + vd->vdev_state = VDEV_STATE_OFFLINE; + else + vd->vdev_state = VDEV_STATE_CLOSED; + vd->vdev_stat.vs_aux = VDEV_AUX_NONE; +} + +void +vdev_reopen(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + + ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + + vdev_close(vd); + (void) vdev_open(vd); + + /* + * Call vdev_validate() here to make sure we have the same device. + * Otherwise, a device with an invalid label could be successfully + * opened in response to vdev_reopen(). + */ + if (vd->vdev_aux) { + (void) vdev_validate_aux(vd); + if (vdev_readable(vd) && vdev_writeable(vd) && + !l2arc_vdev_present(vd)) { + uint64_t size = vdev_get_rsize(vd); + l2arc_add_vdev(spa, vd, + VDEV_LABEL_START_SIZE, + size - VDEV_LABEL_START_SIZE); + } + } else { + (void) vdev_validate(vd); + } + + /* + * Reassess parent vdev's health. + */ + vdev_propagate_state(vd); +} + +int +vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) +{ + int error; + + /* + * Normally, partial opens (e.g. of a mirror) are allowed. + * For a create, however, we want to fail the request if + * there are any components we can't open. + */ + error = vdev_open(vd); + + if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { + vdev_close(vd); + return (error ? error : ENXIO); + } + + /* + * Recursively initialize all labels. + */ + if ((error = vdev_label_init(vd, txg, isreplacing ? + VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { + vdev_close(vd); + return (error); + } + + return (0); +} + +/* + * The is the latter half of vdev_create(). It is distinct because it + * involves initiating transactions in order to do metaslab creation. + * For creation, we want to try to create all vdevs at once and then undo it + * if anything fails; this is much harder if we have pending transactions. + */ +void +vdev_init(vdev_t *vd, uint64_t txg) +{ + /* + * Aim for roughly 200 metaslabs per vdev. + */ + vd->vdev_ms_shift = highbit(vd->vdev_asize / 200); + vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT); + + /* + * Initialize the vdev's metaslabs. This can't fail because + * there's nothing to read when creating all new metaslabs. + */ + VERIFY(vdev_metaslab_init(vd, txg) == 0); +} + +void +vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) +{ + ASSERT(vd == vd->vdev_top); + ASSERT(ISP2(flags)); + + if (flags & VDD_METASLAB) + (void) txg_list_add(&vd->vdev_ms_list, arg, txg); + + if (flags & VDD_DTL) + (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); + + (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); +} + +void +vdev_dtl_dirty(space_map_t *sm, uint64_t txg, uint64_t size) +{ + mutex_enter(sm->sm_lock); + if (!space_map_contains(sm, txg, size)) + space_map_add(sm, txg, size); + mutex_exit(sm->sm_lock); +} + +int +vdev_dtl_contains(space_map_t *sm, uint64_t txg, uint64_t size) +{ + int dirty; + + /* + * Quick test without the lock -- covers the common case that + * there are no dirty time segments. + */ + if (sm->sm_space == 0) + return (0); + + mutex_enter(sm->sm_lock); + dirty = space_map_contains(sm, txg, size); + mutex_exit(sm->sm_lock); + + return (dirty); +} + +/* + * Reassess DTLs after a config change or scrub completion. + */ +void +vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) +{ + spa_t *spa = vd->vdev_spa; + int c; + + ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); + + if (vd->vdev_children == 0) { + mutex_enter(&vd->vdev_dtl_lock); + if (scrub_txg != 0 && + (spa->spa_scrub_started || spa->spa_scrub_errors == 0)) { + /* XXX should check scrub_done? */ + /* + * We completed a scrub up to scrub_txg. If we + * did it without rebooting, then the scrub dtl + * will be valid, so excise the old region and + * fold in the scrub dtl. Otherwise, leave the + * dtl as-is if there was an error. + */ + space_map_excise(&vd->vdev_dtl_map, 0, scrub_txg); + space_map_union(&vd->vdev_dtl_map, &vd->vdev_dtl_scrub); + } + if (scrub_done) + space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); + mutex_exit(&vd->vdev_dtl_lock); + + if (txg != 0) + vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); + return; + } + + /* + * Make sure the DTLs are always correct under the scrub lock. + */ + if (vd == spa->spa_root_vdev) + mutex_enter(&spa->spa_scrub_lock); + + mutex_enter(&vd->vdev_dtl_lock); + space_map_vacate(&vd->vdev_dtl_map, NULL, NULL); + space_map_vacate(&vd->vdev_dtl_scrub, NULL, NULL); + mutex_exit(&vd->vdev_dtl_lock); + + for (c = 0; c < vd->vdev_children; c++) { + vdev_t *cvd = vd->vdev_child[c]; + vdev_dtl_reassess(cvd, txg, scrub_txg, scrub_done); + mutex_enter(&vd->vdev_dtl_lock); + space_map_union(&vd->vdev_dtl_map, &cvd->vdev_dtl_map); + space_map_union(&vd->vdev_dtl_scrub, &cvd->vdev_dtl_scrub); + mutex_exit(&vd->vdev_dtl_lock); + } + + if (vd == spa->spa_root_vdev) + mutex_exit(&spa->spa_scrub_lock); +} + +static int +vdev_dtl_load(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + space_map_obj_t *smo = &vd->vdev_dtl; + objset_t *mos = spa->spa_meta_objset; + dmu_buf_t *db; + int error; + + ASSERT(vd->vdev_children == 0); + + if (smo->smo_object == 0) + return (0); + + if ((error = dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)) != 0) + return (error); + + ASSERT3U(db->db_size, >=, sizeof (*smo)); + bcopy(db->db_data, smo, sizeof (*smo)); + dmu_buf_rele(db, FTAG); + + mutex_enter(&vd->vdev_dtl_lock); + error = space_map_load(&vd->vdev_dtl_map, NULL, SM_ALLOC, smo, mos); + mutex_exit(&vd->vdev_dtl_lock); + + return (error); +} + +void +vdev_dtl_sync(vdev_t *vd, uint64_t txg) +{ + spa_t *spa = vd->vdev_spa; + space_map_obj_t *smo = &vd->vdev_dtl; + space_map_t *sm = &vd->vdev_dtl_map; + objset_t *mos = spa->spa_meta_objset; + space_map_t smsync; + kmutex_t smlock; + dmu_buf_t *db; + dmu_tx_t *tx; + + tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); + + if (vd->vdev_detached) { + if (smo->smo_object != 0) { + int err = dmu_object_free(mos, smo->smo_object, tx); + ASSERT3U(err, ==, 0); + smo->smo_object = 0; + } + dmu_tx_commit(tx); + return; + } + + if (smo->smo_object == 0) { + ASSERT(smo->smo_objsize == 0); + ASSERT(smo->smo_alloc == 0); + smo->smo_object = dmu_object_alloc(mos, + DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, + DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); + ASSERT(smo->smo_object != 0); + vdev_config_dirty(vd->vdev_top); + } + + mutex_init(&smlock, NULL, MUTEX_DEFAULT, NULL); + + space_map_create(&smsync, sm->sm_start, sm->sm_size, sm->sm_shift, + &smlock); + + mutex_enter(&smlock); + + mutex_enter(&vd->vdev_dtl_lock); + space_map_walk(sm, space_map_add, &smsync); + mutex_exit(&vd->vdev_dtl_lock); + + space_map_truncate(smo, mos, tx); + space_map_sync(&smsync, SM_ALLOC, smo, mos, tx); + + space_map_destroy(&smsync); + + mutex_exit(&smlock); + mutex_destroy(&smlock); + + VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); + dmu_buf_will_dirty(db, tx); + ASSERT3U(db->db_size, >=, sizeof (*smo)); + bcopy(smo, db->db_data, sizeof (*smo)); + dmu_buf_rele(db, FTAG); + + dmu_tx_commit(tx); +} + +/* + * Determine if resilver is needed, and if so the txg range. + */ +boolean_t +vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) +{ + boolean_t needed = B_FALSE; + uint64_t thismin = UINT64_MAX; + uint64_t thismax = 0; + + if (vd->vdev_children == 0) { + mutex_enter(&vd->vdev_dtl_lock); + if (vd->vdev_dtl_map.sm_space != 0 && vdev_writeable(vd)) { + space_seg_t *ss; + + ss = avl_first(&vd->vdev_dtl_map.sm_root); + thismin = ss->ss_start - 1; + ss = avl_last(&vd->vdev_dtl_map.sm_root); + thismax = ss->ss_end; + needed = B_TRUE; + } + mutex_exit(&vd->vdev_dtl_lock); + } else { + int c; + for (c = 0; c < vd->vdev_children; c++) { + vdev_t *cvd = vd->vdev_child[c]; + uint64_t cmin, cmax; + + if (vdev_resilver_needed(cvd, &cmin, &cmax)) { + thismin = MIN(thismin, cmin); + thismax = MAX(thismax, cmax); + needed = B_TRUE; + } + } + } + + if (needed && minp) { + *minp = thismin; + *maxp = thismax; + } + return (needed); +} + +void +vdev_load(vdev_t *vd) +{ + int c; + + /* + * Recursively load all children. + */ + for (c = 0; c < vd->vdev_children; c++) + vdev_load(vd->vdev_child[c]); + + /* + * If this is a top-level vdev, initialize its metaslabs. + */ + if (vd == vd->vdev_top && + (vd->vdev_ashift == 0 || vd->vdev_asize == 0 || + vdev_metaslab_init(vd, 0) != 0)) + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + + /* + * If this is a leaf vdev, load its DTL. + */ + if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0) + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); +} + +/* + * The special vdev case is used for hot spares and l2cache devices. Its + * sole purpose it to set the vdev state for the associated vdev. To do this, + * we make sure that we can open the underlying device, then try to read the + * label, and make sure that the label is sane and that it hasn't been + * repurposed to another pool. + */ +int +vdev_validate_aux(vdev_t *vd) +{ + nvlist_t *label; + uint64_t guid, version; + uint64_t state; + + if (!vdev_readable(vd)) + return (0); + + if ((label = vdev_label_read_config(vd)) == NULL) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + return (-1); + } + + if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || + version > SPA_VERSION || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || + guid != vd->vdev_guid || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { + vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + nvlist_free(label); + return (-1); + } + + /* + * We don't actually check the pool state here. If it's in fact in + * use by another pool, we update this fact on the fly when requested. + */ + nvlist_free(label); + return (0); +} + +void +vdev_sync_done(vdev_t *vd, uint64_t txg) +{ + metaslab_t *msp; + + while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) + metaslab_sync_done(msp, txg); +} + +void +vdev_sync(vdev_t *vd, uint64_t txg) +{ + spa_t *spa = vd->vdev_spa; + vdev_t *lvd; + metaslab_t *msp; + dmu_tx_t *tx; + + if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) { + ASSERT(vd == vd->vdev_top); + tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); + vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, + DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); + ASSERT(vd->vdev_ms_array != 0); + vdev_config_dirty(vd); + dmu_tx_commit(tx); + } + + while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { + metaslab_sync(msp, txg); + (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); + } + + while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) + vdev_dtl_sync(lvd, txg); + + (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); +} + +uint64_t +vdev_psize_to_asize(vdev_t *vd, uint64_t psize) +{ + return (vd->vdev_ops->vdev_op_asize(vd, psize)); +} + +/* + * Mark the given vdev faulted. A faulted vdev behaves as if the device could + * not be opened, and no I/O is attempted. + */ +int +vdev_fault(spa_t *spa, uint64_t guid) +{ + vdev_t *vd; + + spa_vdev_state_enter(spa); + + if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) + return (spa_vdev_state_exit(spa, NULL, ENODEV)); + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); + + /* + * Faulted state takes precedence over degraded. + */ + vd->vdev_faulted = 1ULL; + vd->vdev_degraded = 0ULL; + vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, VDEV_AUX_ERR_EXCEEDED); + + /* + * If marking the vdev as faulted cause the top-level vdev to become + * unavailable, then back off and simply mark the vdev as degraded + * instead. + */ + if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { + vd->vdev_degraded = 1ULL; + vd->vdev_faulted = 0ULL; + + /* + * If we reopen the device and it's not dead, only then do we + * mark it degraded. + */ + vdev_reopen(vd); + + if (vdev_readable(vd)) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, + VDEV_AUX_ERR_EXCEEDED); + } + } + + return (spa_vdev_state_exit(spa, vd, 0)); +} + +/* + * Mark the given vdev degraded. A degraded vdev is purely an indication to the + * user that something is wrong. The vdev continues to operate as normal as far + * as I/O is concerned. + */ +int +vdev_degrade(spa_t *spa, uint64_t guid) +{ + vdev_t *vd; + + spa_vdev_state_enter(spa); + + if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) + return (spa_vdev_state_exit(spa, NULL, ENODEV)); + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); + + /* + * If the vdev is already faulted, then don't do anything. + */ + if (vd->vdev_faulted || vd->vdev_degraded) + return (spa_vdev_state_exit(spa, NULL, 0)); + + vd->vdev_degraded = 1ULL; + if (!vdev_is_dead(vd)) + vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, + VDEV_AUX_ERR_EXCEEDED); + + return (spa_vdev_state_exit(spa, vd, 0)); +} + +/* + * Online the given vdev. If 'unspare' is set, it implies two things. First, + * any attached spare device should be detached when the device finishes + * resilvering. Second, the online should be treated like a 'test' online case, + * so no FMA events are generated if the device fails to open. + */ +int +vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) +{ + vdev_t *vd; + + spa_vdev_state_enter(spa); + + if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) + return (spa_vdev_state_exit(spa, NULL, ENODEV)); + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); + + vd->vdev_offline = B_FALSE; + vd->vdev_tmpoffline = B_FALSE; + vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); + vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); + vdev_reopen(vd->vdev_top); + vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; + + if (newstate) + *newstate = vd->vdev_state; + if ((flags & ZFS_ONLINE_UNSPARE) && + !vdev_is_dead(vd) && vd->vdev_parent && + vd->vdev_parent->vdev_ops == &vdev_spare_ops && + vd->vdev_parent->vdev_child[0] == vd) + vd->vdev_unspare = B_TRUE; + + (void) spa_vdev_state_exit(spa, vd, 0); + + VERIFY3U(spa_scrub(spa, POOL_SCRUB_RESILVER), ==, 0); + + return (0); +} + +int +vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) +{ + vdev_t *vd; + + spa_vdev_state_enter(spa); + + if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) + return (spa_vdev_state_exit(spa, NULL, ENODEV)); + + if (!vd->vdev_ops->vdev_op_leaf) + return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); + + /* + * If the device isn't already offline, try to offline it. + */ + if (!vd->vdev_offline) { + /* + * If this device's top-level vdev has a non-empty DTL, + * don't allow the device to be offlined. + * + * XXX -- make this more precise by allowing the offline + * as long as the remaining devices don't have any DTL holes. + */ + if (vd->vdev_top->vdev_dtl_map.sm_space != 0) + return (spa_vdev_state_exit(spa, NULL, EBUSY)); + + /* + * Offline this device and reopen its top-level vdev. + * If this action results in the top-level vdev becoming + * unusable, undo it and fail the request. + */ + vd->vdev_offline = B_TRUE; + vdev_reopen(vd->vdev_top); + if (vdev_is_dead(vd->vdev_top) && vd->vdev_aux == NULL) { + vd->vdev_offline = B_FALSE; + vdev_reopen(vd->vdev_top); + return (spa_vdev_state_exit(spa, NULL, EBUSY)); + } + } + + vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); + + return (spa_vdev_state_exit(spa, vd, 0)); +} + +/* + * Clear the error counts associated with this vdev. Unlike vdev_online() and + * vdev_offline(), we assume the spa config is locked. We also clear all + * children. If 'vd' is NULL, then the user wants to clear all vdevs. + */ +void +vdev_clear(spa_t *spa, vdev_t *vd) +{ + vdev_t *rvd = spa->spa_root_vdev; + + ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + + if (vd == NULL) + vd = rvd; + + vd->vdev_stat.vs_read_errors = 0; + vd->vdev_stat.vs_write_errors = 0; + vd->vdev_stat.vs_checksum_errors = 0; + + for (int c = 0; c < vd->vdev_children; c++) + vdev_clear(spa, vd->vdev_child[c]); + + /* + * If we're in the FAULTED state or have experienced failed I/O, then + * clear the persistent state and attempt to reopen the device. We + * also mark the vdev config dirty, so that the new faulted state is + * written out to disk. + */ + if (vd->vdev_faulted || vd->vdev_degraded || + !vdev_readable(vd) || !vdev_writeable(vd)) { + + vd->vdev_faulted = vd->vdev_degraded = 0; + vd->vdev_cant_read = B_FALSE; + vd->vdev_cant_write = B_FALSE; + + vdev_reopen(vd); + + if (vd != rvd) + vdev_state_dirty(vd->vdev_top); + + if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) + spa_async_request(spa, SPA_ASYNC_RESILVER); + + spa_event_notify(spa, vd, ESC_ZFS_VDEV_CLEAR); + } +} + +boolean_t +vdev_is_dead(vdev_t *vd) +{ + return (vd->vdev_state < VDEV_STATE_DEGRADED); +} + +boolean_t +vdev_readable(vdev_t *vd) +{ + return (!vdev_is_dead(vd) && !vd->vdev_cant_read); +} + +boolean_t +vdev_writeable(vdev_t *vd) +{ + return (!vdev_is_dead(vd) && !vd->vdev_cant_write); +} + +boolean_t +vdev_allocatable(vdev_t *vd) +{ + /* + * We currently allow allocations from vdevs which maybe in the + * process of reopening (i.e. VDEV_STATE_CLOSED). If the device + * fails to reopen then we'll catch it later when we're holding + * the proper locks. + */ + return (!(vdev_is_dead(vd) && vd->vdev_state != VDEV_STATE_CLOSED) && + !vd->vdev_cant_write); +} + +boolean_t +vdev_accessible(vdev_t *vd, zio_t *zio) +{ + ASSERT(zio->io_vd == vd); + + if (vdev_is_dead(vd) || vd->vdev_remove_wanted) + return (B_FALSE); + + if (zio->io_type == ZIO_TYPE_READ) + return (!vd->vdev_cant_read); + + if (zio->io_type == ZIO_TYPE_WRITE) + return (!vd->vdev_cant_write); + + return (B_TRUE); +} + +/* + * Get statistics for the given vdev. + */ +void +vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) +{ + vdev_t *rvd = vd->vdev_spa->spa_root_vdev; + + mutex_enter(&vd->vdev_stat_lock); + bcopy(&vd->vdev_stat, vs, sizeof (*vs)); + vs->vs_scrub_errors = vd->vdev_spa->spa_scrub_errors; + vs->vs_timestamp = gethrtime() - vs->vs_timestamp; + vs->vs_state = vd->vdev_state; + vs->vs_rsize = vdev_get_rsize(vd); + mutex_exit(&vd->vdev_stat_lock); + + /* + * If we're getting stats on the root vdev, aggregate the I/O counts + * over all top-level vdevs (i.e. the direct children of the root). + */ + if (vd == rvd) { + for (int c = 0; c < rvd->vdev_children; c++) { + vdev_t *cvd = rvd->vdev_child[c]; + vdev_stat_t *cvs = &cvd->vdev_stat; + + mutex_enter(&vd->vdev_stat_lock); + for (int t = 0; t < ZIO_TYPES; t++) { + vs->vs_ops[t] += cvs->vs_ops[t]; + vs->vs_bytes[t] += cvs->vs_bytes[t]; + } + vs->vs_scrub_examined += cvs->vs_scrub_examined; + mutex_exit(&vd->vdev_stat_lock); + } + } +} + +void +vdev_clear_stats(vdev_t *vd) +{ + mutex_enter(&vd->vdev_stat_lock); + vd->vdev_stat.vs_space = 0; + vd->vdev_stat.vs_dspace = 0; + vd->vdev_stat.vs_alloc = 0; + mutex_exit(&vd->vdev_stat_lock); +} + +void +vdev_stat_update(zio_t *zio, uint64_t psize) +{ + vdev_t *rvd = zio->io_spa->spa_root_vdev; + vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; + vdev_t *pvd; + uint64_t txg = zio->io_txg; + vdev_stat_t *vs = &vd->vdev_stat; + zio_type_t type = zio->io_type; + int flags = zio->io_flags; + + /* + * If this i/o is a gang leader, it didn't do any actual work. + */ + if (zio->io_gang_tree) + return; + + if (zio->io_error == 0) { + /* + * If this is a root i/o, don't count it -- we've already + * counted the top-level vdevs, and vdev_get_stats() will + * aggregate them when asked. This reduces contention on + * the root vdev_stat_lock and implicitly handles blocks + * that compress away to holes, for which there is no i/o. + * (Holes never create vdev children, so all the counters + * remain zero, which is what we want.) + * + * Note: this only applies to successful i/o (io_error == 0) + * because unlike i/o counts, errors are not additive. + * When reading a ditto block, for example, failure of + * one top-level vdev does not imply a root-level error. + */ + if (vd == rvd) + return; + + ASSERT(vd == zio->io_vd); + if (!(flags & ZIO_FLAG_IO_BYPASS)) { + mutex_enter(&vd->vdev_stat_lock); + vs->vs_ops[type]++; + vs->vs_bytes[type] += psize; + mutex_exit(&vd->vdev_stat_lock); + } + if (flags & ZIO_FLAG_IO_REPAIR) { + ASSERT(zio->io_delegate_list == NULL); + mutex_enter(&vd->vdev_stat_lock); + if (flags & ZIO_FLAG_SCRUB_THREAD) + vs->vs_scrub_repaired += psize; + else + vs->vs_self_healed += psize; + mutex_exit(&vd->vdev_stat_lock); + } + return; + } + + if (flags & ZIO_FLAG_SPECULATIVE) + return; + + mutex_enter(&vd->vdev_stat_lock); + if (type == ZIO_TYPE_READ) { + if (zio->io_error == ECKSUM) + vs->vs_checksum_errors++; + else + vs->vs_read_errors++; + } + if (type == ZIO_TYPE_WRITE) + vs->vs_write_errors++; + mutex_exit(&vd->vdev_stat_lock); + + if (type == ZIO_TYPE_WRITE && txg != 0 && vd->vdev_children == 0) { + if (flags & ZIO_FLAG_SCRUB_THREAD) { + ASSERT(flags & ZIO_FLAG_IO_REPAIR); + for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + vdev_dtl_dirty(&pvd->vdev_dtl_scrub, txg, 1); + } + if (!(flags & ZIO_FLAG_IO_REPAIR)) { + if (vdev_dtl_contains(&vd->vdev_dtl_map, txg, 1)) + return; + vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); + for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + vdev_dtl_dirty(&pvd->vdev_dtl_map, txg, 1); + } + } +} + +void +vdev_scrub_stat_update(vdev_t *vd, pool_scrub_type_t type, boolean_t complete) +{ + int c; + vdev_stat_t *vs = &vd->vdev_stat; + + for (c = 0; c < vd->vdev_children; c++) + vdev_scrub_stat_update(vd->vdev_child[c], type, complete); + + mutex_enter(&vd->vdev_stat_lock); + + if (type == POOL_SCRUB_NONE) { + /* + * Update completion and end time. Leave everything else alone + * so we can report what happened during the previous scrub. + */ + vs->vs_scrub_complete = complete; + vs->vs_scrub_end = gethrestime_sec(); + } else { + vs->vs_scrub_type = type; + vs->vs_scrub_complete = 0; + vs->vs_scrub_examined = 0; + vs->vs_scrub_repaired = 0; + vs->vs_scrub_start = gethrestime_sec(); + vs->vs_scrub_end = 0; + } + + mutex_exit(&vd->vdev_stat_lock); +} + +/* + * Update the in-core space usage stats for this vdev and the root vdev. + */ +void +vdev_space_update(vdev_t *vd, int64_t space_delta, int64_t alloc_delta, + boolean_t update_root) +{ + int64_t dspace_delta = space_delta; + spa_t *spa = vd->vdev_spa; + vdev_t *rvd = spa->spa_root_vdev; + + ASSERT(vd == vd->vdev_top); + + /* + * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion + * factor. We must calculate this here and not at the root vdev + * because the root vdev's psize-to-asize is simply the max of its + * childrens', thus not accurate enough for us. + */ + ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0); + dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) * + vd->vdev_deflate_ratio; + + mutex_enter(&vd->vdev_stat_lock); + vd->vdev_stat.vs_space += space_delta; + vd->vdev_stat.vs_alloc += alloc_delta; + vd->vdev_stat.vs_dspace += dspace_delta; + mutex_exit(&vd->vdev_stat_lock); + + if (update_root) { + ASSERT(rvd == vd->vdev_parent); + ASSERT(vd->vdev_ms_count != 0); + + /* + * Don't count non-normal (e.g. intent log) space as part of + * the pool's capacity. + */ + if (vd->vdev_mg->mg_class != spa->spa_normal_class) + return; + + mutex_enter(&rvd->vdev_stat_lock); + rvd->vdev_stat.vs_space += space_delta; + rvd->vdev_stat.vs_alloc += alloc_delta; + rvd->vdev_stat.vs_dspace += dspace_delta; + mutex_exit(&rvd->vdev_stat_lock); + } +} + +/* + * Mark a top-level vdev's config as dirty, placing it on the dirty list + * so that it will be written out next time the vdev configuration is synced. + * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. + */ +void +vdev_config_dirty(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + vdev_t *rvd = spa->spa_root_vdev; + int c; + + /* + * If this is an aux vdev (as with l2cache devices), then we update the + * vdev config manually and set the sync flag. + */ + if (vd->vdev_aux != NULL) { + spa_aux_vdev_t *sav = vd->vdev_aux; + nvlist_t **aux; + uint_t naux; + + for (c = 0; c < sav->sav_count; c++) { + if (sav->sav_vdevs[c] == vd) + break; + } + + if (c == sav->sav_count) { + /* + * We're being removed. There's nothing more to do. + */ + ASSERT(sav->sav_sync == B_TRUE); + return; + } + + sav->sav_sync = B_TRUE; + + VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, + ZPOOL_CONFIG_L2CACHE, &aux, &naux) == 0); + + ASSERT(c < naux); + + /* + * Setting the nvlist in the middle if the array is a little + * sketchy, but it will work. + */ + nvlist_free(aux[c]); + aux[c] = vdev_config_generate(spa, vd, B_TRUE, B_FALSE, B_TRUE); + + return; + } + + /* + * The dirty list is protected by the SCL_CONFIG lock. The caller + * must either hold SCL_CONFIG as writer, or must be the sync thread + * (which holds SCL_CONFIG as reader). There's only one sync thread, + * so this is sufficient to ensure mutual exclusion. + */ + ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || + (dsl_pool_sync_context(spa_get_dsl(spa)) && + spa_config_held(spa, SCL_CONFIG, RW_READER))); + + if (vd == rvd) { + for (c = 0; c < rvd->vdev_children; c++) + vdev_config_dirty(rvd->vdev_child[c]); + } else { + ASSERT(vd == vd->vdev_top); + + if (!list_link_active(&vd->vdev_config_dirty_node)) + list_insert_head(&spa->spa_config_dirty_list, vd); + } +} + +void +vdev_config_clean(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + + ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || + (dsl_pool_sync_context(spa_get_dsl(spa)) && + spa_config_held(spa, SCL_CONFIG, RW_READER))); + + ASSERT(list_link_active(&vd->vdev_config_dirty_node)); + list_remove(&spa->spa_config_dirty_list, vd); +} + +/* + * Mark a top-level vdev's state as dirty, so that the next pass of + * spa_sync() can convert this into vdev_config_dirty(). We distinguish + * the state changes from larger config changes because they require + * much less locking, and are often needed for administrative actions. + */ +void +vdev_state_dirty(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + + ASSERT(vd == vd->vdev_top); + + /* + * The state list is protected by the SCL_STATE lock. The caller + * must either hold SCL_STATE as writer, or must be the sync thread + * (which holds SCL_STATE as reader). There's only one sync thread, + * so this is sufficient to ensure mutual exclusion. + */ + ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || + (dsl_pool_sync_context(spa_get_dsl(spa)) && + spa_config_held(spa, SCL_STATE, RW_READER))); + + if (!list_link_active(&vd->vdev_state_dirty_node)) + list_insert_head(&spa->spa_state_dirty_list, vd); +} + +void +vdev_state_clean(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + + ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || + (dsl_pool_sync_context(spa_get_dsl(spa)) && + spa_config_held(spa, SCL_STATE, RW_READER))); + + ASSERT(list_link_active(&vd->vdev_state_dirty_node)); + list_remove(&spa->spa_state_dirty_list, vd); +} + +/* + * Propagate vdev state up from children to parent. + */ +void +vdev_propagate_state(vdev_t *vd) +{ + vdev_t *rvd = vd->vdev_spa->spa_root_vdev; + int degraded = 0, faulted = 0; + int corrupted = 0; + int c; + vdev_t *child; + + if (vd->vdev_children > 0) { + for (c = 0; c < vd->vdev_children; c++) { + child = vd->vdev_child[c]; + + if (!vdev_readable(child) || + (!vdev_writeable(child) && (spa_mode & FWRITE))) { + /* + * Root special: if there is a top-level log + * device, treat the root vdev as if it were + * degraded. + */ + if (child->vdev_islog && vd == rvd) + degraded++; + else + faulted++; + } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { + degraded++; + } + + if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) + corrupted++; + } + + vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); + + /* + * Root special: if there is a top-level vdev that cannot be + * opened due to corrupted metadata, then propagate the root + * vdev's aux state as 'corrupt' rather than 'insufficient + * replicas'. + */ + if (corrupted && vd == rvd && + rvd->vdev_state == VDEV_STATE_CANT_OPEN) + vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_CORRUPT_DATA); + } + + if (vd->vdev_parent) + vdev_propagate_state(vd->vdev_parent); +} + +/* + * Set a vdev's state. If this is during an open, we don't update the parent + * state, because we're in the process of opening children depth-first. + * Otherwise, we propagate the change to the parent. + * + * If this routine places a device in a faulted state, an appropriate ereport is + * generated. + */ +void +vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) +{ + uint64_t save_state; + spa_t *spa = vd->vdev_spa; + + if (state == vd->vdev_state) { + vd->vdev_stat.vs_aux = aux; + return; + } + + save_state = vd->vdev_state; + + vd->vdev_state = state; + vd->vdev_stat.vs_aux = aux; + + /* + * If we are setting the vdev state to anything but an open state, then + * always close the underlying device. Otherwise, we keep accessible + * but invalid devices open forever. We don't call vdev_close() itself, + * because that implies some extra checks (offline, etc) that we don't + * want here. This is limited to leaf devices, because otherwise + * closing the device will affect other children. + */ + if (vdev_is_dead(vd) && vd->vdev_ops->vdev_op_leaf) + vd->vdev_ops->vdev_op_close(vd); + + if (vd->vdev_removed && + state == VDEV_STATE_CANT_OPEN && + (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { + /* + * If the previous state is set to VDEV_STATE_REMOVED, then this + * device was previously marked removed and someone attempted to + * reopen it. If this failed due to a nonexistent device, then + * keep the device in the REMOVED state. We also let this be if + * it is one of our special test online cases, which is only + * attempting to online the device and shouldn't generate an FMA + * fault. + */ + vd->vdev_state = VDEV_STATE_REMOVED; + vd->vdev_stat.vs_aux = VDEV_AUX_NONE; + } else if (state == VDEV_STATE_REMOVED) { + /* + * Indicate to the ZFS DE that this device has been removed, and + * any recent errors should be ignored. + */ + zfs_post_remove(spa, vd); + vd->vdev_removed = B_TRUE; + } else if (state == VDEV_STATE_CANT_OPEN) { + /* + * If we fail to open a vdev during an import, we mark it as + * "not available", which signifies that it was never there to + * begin with. Failure to open such a device is not considered + * an error. + */ + if (spa->spa_load_state == SPA_LOAD_IMPORT && + !spa->spa_import_faulted && + vd->vdev_ops->vdev_op_leaf) + vd->vdev_not_present = 1; + + /* + * Post the appropriate ereport. If the 'prevstate' field is + * set to something other than VDEV_STATE_UNKNOWN, it indicates + * that this is part of a vdev_reopen(). In this case, we don't + * want to post the ereport if the device was already in the + * CANT_OPEN state beforehand. + * + * If the 'checkremove' flag is set, then this is an attempt to + * online the device in response to an insertion event. If we + * hit this case, then we have detected an insertion event for a + * faulted or offline device that wasn't in the removed state. + * In this scenario, we don't post an ereport because we are + * about to replace the device, or attempt an online with + * vdev_forcefault, which will generate the fault for us. + */ + if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && + !vd->vdev_not_present && !vd->vdev_checkremove && + vd != spa->spa_root_vdev) { + const char *class; + + switch (aux) { + case VDEV_AUX_OPEN_FAILED: + class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; + break; + case VDEV_AUX_CORRUPT_DATA: + class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; + break; + case VDEV_AUX_NO_REPLICAS: + class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; + break; + case VDEV_AUX_BAD_GUID_SUM: + class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; + break; + case VDEV_AUX_TOO_SMALL: + class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; + break; + case VDEV_AUX_BAD_LABEL: + class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; + break; + case VDEV_AUX_IO_FAILURE: + class = FM_EREPORT_ZFS_IO_FAILURE; + break; + default: + class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; + } + + zfs_ereport_post(class, spa, vd, NULL, save_state, 0); + } + + /* Erase any notion of persistent removed state */ + vd->vdev_removed = B_FALSE; + } else { + vd->vdev_removed = B_FALSE; + } + + if (!isopen) + vdev_propagate_state(vd); +} + +/* + * Check the vdev configuration to ensure that it's capable of supporting + * a root pool. Currently, we do not support RAID-Z or partial configuration. + * In addition, only a single top-level vdev is allowed and none of the leaves + * can be wholedisks. + */ +boolean_t +vdev_is_bootable(vdev_t *vd) +{ + int c; + + if (!vd->vdev_ops->vdev_op_leaf) { + char *vdev_type = vd->vdev_ops->vdev_op_type; + + if (strcmp(vdev_type, VDEV_TYPE_ROOT) == 0 && + vd->vdev_children > 1) { + return (B_FALSE); + } else if (strcmp(vdev_type, VDEV_TYPE_RAIDZ) == 0 || + strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) { + return (B_FALSE); + } + } else if (vd->vdev_wholedisk == 1) { + return (B_FALSE); + } + + for (c = 0; c < vd->vdev_children; c++) { + if (!vdev_is_bootable(vd->vdev_child[c])) + return (B_FALSE); + } + return (B_TRUE); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_cache.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_cache.c new file mode 100644 index 000000000000..5a7b59f6ed84 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_cache.c @@ -0,0 +1,425 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +/* + * Virtual device read-ahead caching. + * + * This file implements a simple LRU read-ahead cache. When the DMU reads + * a given block, it will often want other, nearby blocks soon thereafter. + * We take advantage of this by reading a larger disk region and caching + * the result. In the best case, this can turn 128 back-to-back 512-byte + * reads into a single 64k read followed by 127 cache hits; this reduces + * latency dramatically. In the worst case, it can turn an isolated 512-byte + * read into a 64k read, which doesn't affect latency all that much but is + * terribly wasteful of bandwidth. A more intelligent version of the cache + * could keep track of access patterns and not do read-ahead unless it sees + * at least two temporally close I/Os to the same region. Currently, only + * metadata I/O is inflated. A futher enhancement could take advantage of + * more semantic information about the I/O. And it could use something + * faster than an AVL tree; that was chosen solely for convenience. + * + * There are five cache operations: allocate, fill, read, write, evict. + * + * (1) Allocate. This reserves a cache entry for the specified region. + * We separate the allocate and fill operations so that multiple threads + * don't generate I/O for the same cache miss. + * + * (2) Fill. When the I/O for a cache miss completes, the fill routine + * places the data in the previously allocated cache entry. + * + * (3) Read. Read data from the cache. + * + * (4) Write. Update cache contents after write completion. + * + * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry + * if the total cache size exceeds zfs_vdev_cache_size. + */ + +/* + * These tunables are for performance analysis. + */ +/* + * All i/os smaller than zfs_vdev_cache_max will be turned into + * 1<ve_offset < ve2->ve_offset) + return (-1); + if (ve1->ve_offset > ve2->ve_offset) + return (1); + return (0); +} + +static int +vdev_cache_lastused_compare(const void *a1, const void *a2) +{ + const vdev_cache_entry_t *ve1 = a1; + const vdev_cache_entry_t *ve2 = a2; + + if (ve1->ve_lastused < ve2->ve_lastused) + return (-1); + if (ve1->ve_lastused > ve2->ve_lastused) + return (1); + + /* + * Among equally old entries, sort by offset to ensure uniqueness. + */ + return (vdev_cache_offset_compare(a1, a2)); +} + +/* + * Evict the specified entry from the cache. + */ +static void +vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) +{ + ASSERT(MUTEX_HELD(&vc->vc_lock)); + ASSERT(ve->ve_fill_io == NULL); + ASSERT(ve->ve_data != NULL); + + avl_remove(&vc->vc_lastused_tree, ve); + avl_remove(&vc->vc_offset_tree, ve); + zio_buf_free(ve->ve_data, VCBS); + kmem_free(ve, sizeof (vdev_cache_entry_t)); +} + +/* + * Allocate an entry in the cache. At the point we don't have the data, + * we're just creating a placeholder so that multiple threads don't all + * go off and read the same blocks. + */ +static vdev_cache_entry_t * +vdev_cache_allocate(zio_t *zio) +{ + vdev_cache_t *vc = &zio->io_vd->vdev_cache; + uint64_t offset = P2ALIGN(zio->io_offset, VCBS); + vdev_cache_entry_t *ve; + + ASSERT(MUTEX_HELD(&vc->vc_lock)); + + if (zfs_vdev_cache_size == 0) + return (NULL); + + /* + * If adding a new entry would exceed the cache size, + * evict the oldest entry (LRU). + */ + if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > + zfs_vdev_cache_size) { + ve = avl_first(&vc->vc_lastused_tree); + if (ve->ve_fill_io != NULL) + return (NULL); + ASSERT(ve->ve_hits != 0); + vdev_cache_evict(vc, ve); + } + + ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); + ve->ve_offset = offset; + ve->ve_lastused = lbolt; + ve->ve_data = zio_buf_alloc(VCBS); + + avl_add(&vc->vc_offset_tree, ve); + avl_add(&vc->vc_lastused_tree, ve); + + return (ve); +} + +static void +vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) +{ + uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); + + ASSERT(MUTEX_HELD(&vc->vc_lock)); + ASSERT(ve->ve_fill_io == NULL); + + if (ve->ve_lastused != lbolt) { + avl_remove(&vc->vc_lastused_tree, ve); + ve->ve_lastused = lbolt; + avl_add(&vc->vc_lastused_tree, ve); + } + + ve->ve_hits++; + bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); +} + +/* + * Fill a previously allocated cache entry with data. + */ +static void +vdev_cache_fill(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + vdev_cache_t *vc = &vd->vdev_cache; + vdev_cache_entry_t *ve = zio->io_private; + zio_t *dio; + + ASSERT(zio->io_size == VCBS); + + /* + * Add data to the cache. + */ + mutex_enter(&vc->vc_lock); + + ASSERT(ve->ve_fill_io == zio); + ASSERT(ve->ve_offset == zio->io_offset); + ASSERT(ve->ve_data == zio->io_data); + + ve->ve_fill_io = NULL; + + /* + * Even if this cache line was invalidated by a missed write update, + * any reads that were queued up before the missed update are still + * valid, so we can satisfy them from this line before we evict it. + */ + for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next) + vdev_cache_hit(vc, ve, dio); + + if (zio->io_error || ve->ve_missed_update) + vdev_cache_evict(vc, ve); + + mutex_exit(&vc->vc_lock); + + while ((dio = zio->io_delegate_list) != NULL) { + zio->io_delegate_list = dio->io_delegate_next; + dio->io_delegate_next = NULL; + dio->io_error = zio->io_error; + zio_execute(dio); + } +} + +/* + * Read data from the cache. Returns 0 on cache hit, errno on a miss. + */ +int +vdev_cache_read(zio_t *zio) +{ + vdev_cache_t *vc = &zio->io_vd->vdev_cache; + vdev_cache_entry_t *ve, ve_search; + uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); + uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); + zio_t *fio; + + ASSERT(zio->io_type == ZIO_TYPE_READ); + + if (zio->io_flags & ZIO_FLAG_DONT_CACHE) + return (EINVAL); + + if (zio->io_size > zfs_vdev_cache_max) + return (EOVERFLOW); + + /* + * If the I/O straddles two or more cache blocks, don't cache it. + */ + if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) + return (EXDEV); + + ASSERT(cache_phase + zio->io_size <= VCBS); + + mutex_enter(&vc->vc_lock); + + ve_search.ve_offset = cache_offset; + ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); + + if (ve != NULL) { + if (ve->ve_missed_update) { + mutex_exit(&vc->vc_lock); + return (ESTALE); + } + + if ((fio = ve->ve_fill_io) != NULL) { + zio->io_delegate_next = fio->io_delegate_list; + fio->io_delegate_list = zio; + zio_vdev_io_bypass(zio); + mutex_exit(&vc->vc_lock); + VDCSTAT_BUMP(vdc_stat_delegations); + return (0); + } + + vdev_cache_hit(vc, ve, zio); + zio_vdev_io_bypass(zio); + + mutex_exit(&vc->vc_lock); + zio_execute(zio); + VDCSTAT_BUMP(vdc_stat_hits); + return (0); + } + + ve = vdev_cache_allocate(zio); + + if (ve == NULL) { + mutex_exit(&vc->vc_lock); + return (ENOMEM); + } + + fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, + ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL, + ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); + + ve->ve_fill_io = fio; + fio->io_delegate_list = zio; + zio_vdev_io_bypass(zio); + + mutex_exit(&vc->vc_lock); + zio_nowait(fio); + VDCSTAT_BUMP(vdc_stat_misses); + + return (0); +} + +/* + * Update cache contents upon write completion. + */ +void +vdev_cache_write(zio_t *zio) +{ + vdev_cache_t *vc = &zio->io_vd->vdev_cache; + vdev_cache_entry_t *ve, ve_search; + uint64_t io_start = zio->io_offset; + uint64_t io_end = io_start + zio->io_size; + uint64_t min_offset = P2ALIGN(io_start, VCBS); + uint64_t max_offset = P2ROUNDUP(io_end, VCBS); + avl_index_t where; + + ASSERT(zio->io_type == ZIO_TYPE_WRITE); + + mutex_enter(&vc->vc_lock); + + ve_search.ve_offset = min_offset; + ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); + + if (ve == NULL) + ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); + + while (ve != NULL && ve->ve_offset < max_offset) { + uint64_t start = MAX(ve->ve_offset, io_start); + uint64_t end = MIN(ve->ve_offset + VCBS, io_end); + + if (ve->ve_fill_io != NULL) { + ve->ve_missed_update = 1; + } else { + bcopy((char *)zio->io_data + start - io_start, + ve->ve_data + start - ve->ve_offset, end - start); + } + ve = AVL_NEXT(&vc->vc_offset_tree, ve); + } + mutex_exit(&vc->vc_lock); +} + +void +vdev_cache_purge(vdev_t *vd) +{ + vdev_cache_t *vc = &vd->vdev_cache; + vdev_cache_entry_t *ve; + + mutex_enter(&vc->vc_lock); + while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) + vdev_cache_evict(vc, ve); + mutex_exit(&vc->vc_lock); +} + +void +vdev_cache_init(vdev_t *vd) +{ + vdev_cache_t *vc = &vd->vdev_cache; + + mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); + + avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, + sizeof (vdev_cache_entry_t), + offsetof(struct vdev_cache_entry, ve_offset_node)); + + avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, + sizeof (vdev_cache_entry_t), + offsetof(struct vdev_cache_entry, ve_lastused_node)); +} + +void +vdev_cache_fini(vdev_t *vd) +{ + vdev_cache_t *vc = &vd->vdev_cache; + + vdev_cache_purge(vd); + + avl_destroy(&vc->vc_offset_tree); + avl_destroy(&vc->vc_lastused_tree); + + mutex_destroy(&vc->vc_lock); +} + +void +vdev_cache_stat_init(void) +{ + vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", + KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), + KSTAT_FLAG_VIRTUAL); + if (vdc_ksp != NULL) { + vdc_ksp->ks_data = &vdc_stats; + kstat_install(vdc_ksp); + } +} + +void +vdev_cache_stat_fini(void) +{ + if (vdc_ksp != NULL) { + kstat_delete(vdc_ksp); + vdc_ksp = NULL; + } +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_disk.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_disk.c new file mode 100644 index 000000000000..35d4e2a9200d --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_disk.c @@ -0,0 +1,526 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Virtual device vector for disks. + */ + +extern ldi_ident_t zfs_li; + +typedef struct vdev_disk_buf { + buf_t vdb_buf; + zio_t *vdb_io; +} vdev_disk_buf_t; + +static int +vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) +{ + vdev_disk_t *dvd; + struct dk_minfo dkm; + int error; + dev_t dev; + int otyp; + + /* + * We must have a pathname, and it must be absolute. + */ + if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + + dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); + + /* + * When opening a disk device, we want to preserve the user's original + * intent. We always want to open the device by the path the user gave + * us, even if it is one of multiple paths to the save device. But we + * also want to be able to survive disks being removed/recabled. + * Therefore the sequence of opening devices is: + * + * 1. Try opening the device by path. For legacy pools without the + * 'whole_disk' property, attempt to fix the path by appending 's0'. + * + * 2. If the devid of the device matches the stored value, return + * success. + * + * 3. Otherwise, the device may have moved. Try opening the device + * by the devid instead. + * + * If the vdev is part of the root pool, we avoid opening it by path. + * We do this because there is no /dev path available early in boot, + * and if we try to open the device by path at a later point, we can + * deadlock when devfsadm attempts to open the underlying backing store + * file. + */ + if (vd->vdev_devid != NULL) { + if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, + &dvd->vd_minor) != 0) { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + } + + error = EINVAL; /* presume failure */ + + if (vd->vdev_path != NULL && !spa_is_root(vd->vdev_spa)) { + ddi_devid_t devid; + + if (vd->vdev_wholedisk == -1ULL) { + size_t len = strlen(vd->vdev_path) + 3; + char *buf = kmem_alloc(len, KM_SLEEP); + ldi_handle_t lh; + + (void) snprintf(buf, len, "%ss0", vd->vdev_path); + + if (ldi_open_by_name(buf, spa_mode, kcred, + &lh, zfs_li) == 0) { + spa_strfree(vd->vdev_path); + vd->vdev_path = buf; + vd->vdev_wholedisk = 1ULL; + (void) ldi_close(lh, spa_mode, kcred); + } else { + kmem_free(buf, len); + } + } + + error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred, + &dvd->vd_lh, zfs_li); + + /* + * Compare the devid to the stored value. + */ + if (error == 0 && vd->vdev_devid != NULL && + ldi_get_devid(dvd->vd_lh, &devid) == 0) { + if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { + error = EINVAL; + (void) ldi_close(dvd->vd_lh, spa_mode, kcred); + dvd->vd_lh = NULL; + } + ddi_devid_free(devid); + } + + /* + * If we succeeded in opening the device, but 'vdev_wholedisk' + * is not yet set, then this must be a slice. + */ + if (error == 0 && vd->vdev_wholedisk == -1ULL) + vd->vdev_wholedisk = 0; + } + + /* + * If we were unable to open by path, or the devid check fails, open by + * devid instead. + */ + if (error != 0 && vd->vdev_devid != NULL) + error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, + spa_mode, kcred, &dvd->vd_lh, zfs_li); + + /* + * If all else fails, then try opening by physical path (if available) + * or the logical path (if we failed due to the devid check). While not + * as reliable as the devid, this will give us something, and the higher + * level vdev validation will prevent us from opening the wrong device. + */ + if (error) { + if (vd->vdev_physpath != NULL && + (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != ENODEV) + error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode, + kcred, &dvd->vd_lh, zfs_li); + + /* + * Note that we don't support the legacy auto-wholedisk support + * as above. This hasn't been used in a very long time and we + * don't need to propagate its oddities to this edge condition. + */ + if (error && vd->vdev_path != NULL && + !spa_is_root(vd->vdev_spa)) + error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred, + &dvd->vd_lh, zfs_li); + } + + if (error) { + vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; + return (error); + } + + /* + * Once a device is opened, verify that the physical device path (if + * available) is up to date. + */ + if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && + ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { + char *physpath, *minorname; + + physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); + minorname = NULL; + if (ddi_dev_pathname(dev, otyp, physpath) == 0 && + ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && + (vd->vdev_physpath == NULL || + strcmp(vd->vdev_physpath, physpath) != 0)) { + if (vd->vdev_physpath) + spa_strfree(vd->vdev_physpath); + (void) strlcat(physpath, ":", MAXPATHLEN); + (void) strlcat(physpath, minorname, MAXPATHLEN); + vd->vdev_physpath = spa_strdup(physpath); + } + if (minorname) + kmem_free(minorname, strlen(minorname) + 1); + kmem_free(physpath, MAXPATHLEN); + } + + /* + * Determine the actual size of the device. + */ + if (ldi_get_size(dvd->vd_lh, psize) != 0) { + vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; + return (EINVAL); + } + + /* + * If we own the whole disk, try to enable disk write caching. + * We ignore errors because it's OK if we can't do it. + */ + if (vd->vdev_wholedisk == 1) { + int wce = 1; + (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, + FKIOCTL, kcred, NULL); + } + + /* + * Determine the device's minimum transfer size. + * If the ioctl isn't supported, assume DEV_BSIZE. + */ + if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFO, (intptr_t)&dkm, + FKIOCTL, kcred, NULL) != 0) + dkm.dki_lbsize = DEV_BSIZE; + + *ashift = highbit(MAX(dkm.dki_lbsize, SPA_MINBLOCKSIZE)) - 1; + + /* + * Clear the nowritecache bit, so that on a vdev_reopen() we will + * try again. + */ + vd->vdev_nowritecache = B_FALSE; + + return (0); +} + +static void +vdev_disk_close(vdev_t *vd) +{ + vdev_disk_t *dvd = vd->vdev_tsd; + + if (dvd == NULL) + return; + + if (dvd->vd_minor != NULL) + ddi_devid_str_free(dvd->vd_minor); + + if (dvd->vd_devid != NULL) + ddi_devid_free(dvd->vd_devid); + + if (dvd->vd_lh != NULL) + (void) ldi_close(dvd->vd_lh, spa_mode, kcred); + + kmem_free(dvd, sizeof (vdev_disk_t)); + vd->vdev_tsd = NULL; +} + +int +vdev_disk_physio(ldi_handle_t vd_lh, caddr_t data, size_t size, + uint64_t offset, int flags) +{ + buf_t *bp; + int error = 0; + + if (vd_lh == NULL) + return (EINVAL); + + ASSERT(flags & B_READ || flags & B_WRITE); + + bp = getrbuf(KM_SLEEP); + bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST; + bp->b_bcount = size; + bp->b_un.b_addr = (void *)data; + bp->b_lblkno = lbtodb(offset); + bp->b_bufsize = size; + + error = ldi_strategy(vd_lh, bp); + ASSERT(error == 0); + if ((error = biowait(bp)) == 0 && bp->b_resid != 0) + error = EIO; + freerbuf(bp); + + return (error); +} + +static void +vdev_disk_io_intr(buf_t *bp) +{ + vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp; + zio_t *zio = vdb->vdb_io; + + /* + * The rest of the zio stack only deals with EIO, ECKSUM, and ENXIO. + * Rather than teach the rest of the stack about other error + * possibilities (EFAULT, etc), we normalize the error value here. + */ + zio->io_error = (geterror(bp) != 0 ? EIO : 0); + + if (zio->io_error == 0 && bp->b_resid != 0) + zio->io_error = EIO; + + kmem_free(vdb, sizeof (vdev_disk_buf_t)); + + zio_interrupt(zio); +} + +static void +vdev_disk_ioctl_free(zio_t *zio) +{ + kmem_free(zio->io_vsd, sizeof (struct dk_callback)); +} + +static void +vdev_disk_ioctl_done(void *zio_arg, int error) +{ + zio_t *zio = zio_arg; + + zio->io_error = error; + + zio_interrupt(zio); +} + +static int +vdev_disk_io_start(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + vdev_disk_t *dvd = vd->vdev_tsd; + vdev_disk_buf_t *vdb; + struct dk_callback *dkc; + buf_t *bp; + int error; + + if (zio->io_type == ZIO_TYPE_IOCTL) { + /* XXPOLICY */ + if (!vdev_readable(vd)) { + zio->io_error = ENXIO; + return (ZIO_PIPELINE_CONTINUE); + } + + switch (zio->io_cmd) { + + case DKIOCFLUSHWRITECACHE: + + if (zfs_nocacheflush) + break; + + if (vd->vdev_nowritecache) { + zio->io_error = ENOTSUP; + break; + } + + zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP); + zio->io_vsd_free = vdev_disk_ioctl_free; + + dkc->dkc_callback = vdev_disk_ioctl_done; + dkc->dkc_flag = FLUSH_VOLATILE; + dkc->dkc_cookie = zio; + + error = ldi_ioctl(dvd->vd_lh, zio->io_cmd, + (uintptr_t)dkc, FKIOCTL, kcred, NULL); + + if (error == 0) { + /* + * The ioctl will be done asychronously, + * and will call vdev_disk_ioctl_done() + * upon completion. + */ + return (ZIO_PIPELINE_STOP); + } + + if (error == ENOTSUP || error == ENOTTY) { + /* + * If we get ENOTSUP or ENOTTY, we know that + * no future attempts will ever succeed. + * In this case we set a persistent bit so + * that we don't bother with the ioctl in the + * future. + */ + vd->vdev_nowritecache = B_TRUE; + } + zio->io_error = error; + + break; + + default: + zio->io_error = ENOTSUP; + } + + return (ZIO_PIPELINE_CONTINUE); + } + + vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP); + + vdb->vdb_io = zio; + bp = &vdb->vdb_buf; + + bioinit(bp); + bp->b_flags = B_BUSY | B_NOCACHE | + (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE) | + ((zio->io_flags & ZIO_FLAG_IO_RETRY) ? 0 : B_FAILFAST); + bp->b_bcount = zio->io_size; + bp->b_un.b_addr = zio->io_data; + bp->b_lblkno = lbtodb(zio->io_offset); + bp->b_bufsize = zio->io_size; + bp->b_iodone = (int (*)())vdev_disk_io_intr; + + /* ldi_strategy() will return non-zero only on programming errors */ + VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0); + + return (ZIO_PIPELINE_STOP); +} + +static void +vdev_disk_io_done(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + + /* + * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if + * the device has been removed. If this is the case, then we trigger an + * asynchronous removal of the device. Otherwise, probe the device and + * make sure it's still accessible. + */ + if (zio->io_error == EIO) { + vdev_disk_t *dvd = vd->vdev_tsd; + int state = DKIO_NONE; + + if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state, + FKIOCTL, kcred, NULL) == 0 && state != DKIO_INSERTED) { + vd->vdev_remove_wanted = B_TRUE; + spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); + } + } +} + +vdev_ops_t vdev_disk_ops = { + vdev_disk_open, + vdev_disk_close, + vdev_default_asize, + vdev_disk_io_start, + vdev_disk_io_done, + NULL, + VDEV_TYPE_DISK, /* name of this vdev type */ + B_TRUE /* leaf vdev */ +}; + +/* + * Given the root disk device devid or pathname, read the label from + * the device, and construct a configuration nvlist. + */ +int +vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) +{ + ldi_handle_t vd_lh; + vdev_label_t *label; + uint64_t s, size; + int l; + ddi_devid_t tmpdevid; + int error = -1; + char *minor_name; + + /* + * Read the device label and build the nvlist. + */ + if (devid != NULL && ddi_devid_str_decode(devid, &tmpdevid, + &minor_name) == 0) { + error = ldi_open_by_devid(tmpdevid, minor_name, + spa_mode, kcred, &vd_lh, zfs_li); + ddi_devid_free(tmpdevid); + ddi_devid_str_free(minor_name); + } + + if (error && (error = ldi_open_by_name(devpath, FREAD, kcred, &vd_lh, + zfs_li))) + return (error); + + if (ldi_get_size(vd_lh, &s)) { + (void) ldi_close(vd_lh, FREAD, kcred); + return (EIO); + } + + size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t); + label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP); + + for (l = 0; l < VDEV_LABELS; l++) { + uint64_t offset, state, txg = 0; + + /* read vdev label */ + offset = vdev_label_offset(size, l, 0); + if (vdev_disk_physio(vd_lh, (caddr_t)label, + VDEV_SKIP_SIZE + VDEV_BOOT_HEADER_SIZE + + VDEV_PHYS_SIZE, offset, B_READ) != 0) + continue; + + if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, + sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { + *config = NULL; + continue; + } + + if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, + &state) != 0 || state >= POOL_STATE_DESTROYED) { + nvlist_free(*config); + *config = NULL; + continue; + } + + if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, + &txg) != 0 || txg == 0) { + nvlist_free(*config); + *config = NULL; + continue; + } + + break; + } + + kmem_free(label, sizeof (vdev_label_t)); + (void) ldi_close(vd_lh, FREAD, kcred); + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_file.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_file.c new file mode 100644 index 000000000000..dc0e920bfc52 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_file.c @@ -0,0 +1,188 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Virtual device vector for files. + */ + +static int +vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) +{ + vdev_file_t *vf; + vnode_t *vp; + vattr_t vattr; + int error; + + /* + * We must have a pathname, and it must be absolute. + */ + if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + + vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP); + + /* + * We always open the files from the root of the global zone, even if + * we're in a local zone. If the user has gotten to this point, the + * administrator has already decided that the pool should be available + * to local zone users, so the underlying devices should be as well. + */ + ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/'); + error = vn_openat(vd->vdev_path + 1, UIO_SYSSPACE, + spa_mode | FOFFMAX, 0, &vp, 0, 0, rootdir, -1); + + if (error) { + vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; + return (error); + } + + vf->vf_vnode = vp; + +#ifdef _KERNEL + /* + * Make sure it's a regular file. + */ + if (vp->v_type != VREG) { + vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; + return (ENODEV); + } +#endif + /* + * Determine the physical size of the file. + */ + vattr.va_mask = AT_SIZE; + error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL); + if (error) { + vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; + return (error); + } + + *psize = vattr.va_size; + *ashift = SPA_MINBLOCKSHIFT; + + return (0); +} + +static void +vdev_file_close(vdev_t *vd) +{ + vdev_file_t *vf = vd->vdev_tsd; + + if (vf == NULL) + return; + + if (vf->vf_vnode != NULL) { + (void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred, NULL); + (void) VOP_CLOSE(vf->vf_vnode, spa_mode, 1, 0, kcred, NULL); + VN_RELE(vf->vf_vnode); + } + + kmem_free(vf, sizeof (vdev_file_t)); + vd->vdev_tsd = NULL; +} + +static int +vdev_file_io_start(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + vdev_file_t *vf = vd->vdev_tsd; + ssize_t resid; + + if (zio->io_type == ZIO_TYPE_IOCTL) { + /* XXPOLICY */ + if (!vdev_readable(vd)) { + zio->io_error = ENXIO; + return (ZIO_PIPELINE_CONTINUE); + } + + switch (zio->io_cmd) { + case DKIOCFLUSHWRITECACHE: + zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, + kcred, NULL); + break; + default: + zio->io_error = ENOTSUP; + } + + return (ZIO_PIPELINE_CONTINUE); + } + + zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ? + UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data, + zio->io_size, zio->io_offset, UIO_SYSSPACE, + 0, RLIM64_INFINITY, kcred, &resid); + + if (resid != 0 && zio->io_error == 0) + zio->io_error = ENOSPC; + + zio_interrupt(zio); + + return (ZIO_PIPELINE_STOP); +} + +/* ARGSUSED */ +static void +vdev_file_io_done(zio_t *zio) +{ +} + +vdev_ops_t vdev_file_ops = { + vdev_file_open, + vdev_file_close, + vdev_default_asize, + vdev_file_io_start, + vdev_file_io_done, + NULL, + VDEV_TYPE_FILE, /* name of this vdev type */ + B_TRUE /* leaf vdev */ +}; + +/* + * From userland we access disks just like files. + */ +#ifndef _KERNEL + +vdev_ops_t vdev_disk_ops = { + vdev_file_open, + vdev_file_close, + vdev_default_asize, + vdev_file_io_start, + vdev_file_io_done, + NULL, + VDEV_TYPE_DISK, /* name of this vdev type */ + B_TRUE /* leaf vdev */ +}; + +#endif diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_label.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_label.c new file mode 100644 index 000000000000..9c56d66364d5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_label.c @@ -0,0 +1,1075 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Virtual Device Labels + * --------------------- + * + * The vdev label serves several distinct purposes: + * + * 1. Uniquely identify this device as part of a ZFS pool and confirm its + * identity within the pool. + * + * 2. Verify that all the devices given in a configuration are present + * within the pool. + * + * 3. Determine the uberblock for the pool. + * + * 4. In case of an import operation, determine the configuration of the + * toplevel vdev of which it is a part. + * + * 5. If an import operation cannot find all the devices in the pool, + * provide enough information to the administrator to determine which + * devices are missing. + * + * It is important to note that while the kernel is responsible for writing the + * label, it only consumes the information in the first three cases. The + * latter information is only consumed in userland when determining the + * configuration to import a pool. + * + * + * Label Organization + * ------------------ + * + * Before describing the contents of the label, it's important to understand how + * the labels are written and updated with respect to the uberblock. + * + * When the pool configuration is altered, either because it was newly created + * or a device was added, we want to update all the labels such that we can deal + * with fatal failure at any point. To this end, each disk has two labels which + * are updated before and after the uberblock is synced. Assuming we have + * labels and an uberblock with the following transaction groups: + * + * L1 UB L2 + * +------+ +------+ +------+ + * | | | | | | + * | t10 | | t10 | | t10 | + * | | | | | | + * +------+ +------+ +------+ + * + * In this stable state, the labels and the uberblock were all updated within + * the same transaction group (10). Each label is mirrored and checksummed, so + * that we can detect when we fail partway through writing the label. + * + * In order to identify which labels are valid, the labels are written in the + * following manner: + * + * 1. For each vdev, update 'L1' to the new label + * 2. Update the uberblock + * 3. For each vdev, update 'L2' to the new label + * + * Given arbitrary failure, we can determine the correct label to use based on + * the transaction group. If we fail after updating L1 but before updating the + * UB, we will notice that L1's transaction group is greater than the uberblock, + * so L2 must be valid. If we fail after writing the uberblock but before + * writing L2, we will notice that L2's transaction group is less than L1, and + * therefore L1 is valid. + * + * Another added complexity is that not every label is updated when the config + * is synced. If we add a single device, we do not want to have to re-write + * every label for every device in the pool. This means that both L1 and L2 may + * be older than the pool uberblock, because the necessary information is stored + * on another vdev. + * + * + * On-disk Format + * -------------- + * + * The vdev label consists of two distinct parts, and is wrapped within the + * vdev_label_t structure. The label includes 8k of padding to permit legacy + * VTOC disk labels, but is otherwise ignored. + * + * The first half of the label is a packed nvlist which contains pool wide + * properties, per-vdev properties, and configuration information. It is + * described in more detail below. + * + * The latter half of the label consists of a redundant array of uberblocks. + * These uberblocks are updated whenever a transaction group is committed, + * or when the configuration is updated. When a pool is loaded, we scan each + * vdev for the 'best' uberblock. + * + * + * Configuration Information + * ------------------------- + * + * The nvlist describing the pool and vdev contains the following elements: + * + * version ZFS on-disk version + * name Pool name + * state Pool state + * txg Transaction group in which this label was written + * pool_guid Unique identifier for this pool + * vdev_tree An nvlist describing vdev tree. + * + * Each leaf device label also contains the following: + * + * top_guid Unique ID for top-level vdev in which this is contained + * guid Unique ID for the leaf vdev + * + * The 'vs' configuration follows the format described in 'spa_config.c'. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Basic routines to read and write from a vdev label. + * Used throughout the rest of this file. + */ +uint64_t +vdev_label_offset(uint64_t psize, int l, uint64_t offset) +{ + ASSERT(offset < sizeof (vdev_label_t)); + ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0); + + return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? + 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); +} + +/* + * Returns back the vdev label associated with the passed in offset. + */ +int +vdev_label_number(uint64_t psize, uint64_t offset) +{ + int l; + + if (offset >= psize - VDEV_LABEL_END_SIZE) { + offset -= psize - VDEV_LABEL_END_SIZE; + offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t); + } + l = offset / sizeof (vdev_label_t); + return (l < VDEV_LABELS ? l : -1); +} + +static void +vdev_label_read(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset, + uint64_t size, zio_done_func_t *done, void *private, int flags) +{ + ASSERT(spa_config_held(zio->io_spa, SCL_STATE_ALL, RW_WRITER) == + SCL_STATE_ALL); + ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); + + zio_nowait(zio_read_phys(zio, vd, + vdev_label_offset(vd->vdev_psize, l, offset), + size, buf, ZIO_CHECKSUM_LABEL, done, private, + ZIO_PRIORITY_SYNC_READ, flags, B_TRUE)); +} + +static void +vdev_label_write(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset, + uint64_t size, zio_done_func_t *done, void *private, int flags) +{ + ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL || + (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) == + (SCL_CONFIG | SCL_STATE) && + dsl_pool_sync_context(spa_get_dsl(zio->io_spa)))); + ASSERT(flags & ZIO_FLAG_CONFIG_WRITER); + + zio_nowait(zio_write_phys(zio, vd, + vdev_label_offset(vd->vdev_psize, l, offset), + size, buf, ZIO_CHECKSUM_LABEL, done, private, + ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE)); +} + +/* + * Generate the nvlist representing this vdev's config. + */ +nvlist_t * +vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats, + boolean_t isspare, boolean_t isl2cache) +{ + nvlist_t *nv = NULL; + + VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE, + vd->vdev_ops->vdev_op_type) == 0); + if (!isspare && !isl2cache) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id) + == 0); + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); + + if (vd->vdev_path != NULL) + VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, + vd->vdev_path) == 0); + + if (vd->vdev_devid != NULL) + VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, + vd->vdev_devid) == 0); + + if (vd->vdev_physpath != NULL) + VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH, + vd->vdev_physpath) == 0); + + if (vd->vdev_nparity != 0) { + ASSERT(strcmp(vd->vdev_ops->vdev_op_type, + VDEV_TYPE_RAIDZ) == 0); + + /* + * Make sure someone hasn't managed to sneak a fancy new vdev + * into a crufty old storage pool. + */ + ASSERT(vd->vdev_nparity == 1 || + (vd->vdev_nparity == 2 && + spa_version(spa) >= SPA_VERSION_RAID6)); + + /* + * Note that we'll add the nparity tag even on storage pools + * that only support a single parity device -- older software + * will just ignore it. + */ + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, + vd->vdev_nparity) == 0); + } + + if (vd->vdev_wholedisk != -1ULL) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, + vd->vdev_wholedisk) == 0); + + if (vd->vdev_not_present) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1) == 0); + + if (vd->vdev_isspare) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1) == 0); + + if (!isspare && !isl2cache && vd == vd->vdev_top) { + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, + vd->vdev_ms_array) == 0); + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, + vd->vdev_ms_shift) == 0); + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, + vd->vdev_ashift) == 0); + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE, + vd->vdev_asize) == 0); + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, + vd->vdev_islog) == 0); + } + + if (vd->vdev_dtl.smo_object != 0) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DTL, + vd->vdev_dtl.smo_object) == 0); + + if (getstats) { + vdev_stat_t vs; + vdev_get_stats(vd, &vs); + VERIFY(nvlist_add_uint64_array(nv, ZPOOL_CONFIG_STATS, + (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t)) == 0); + } + + if (!vd->vdev_ops->vdev_op_leaf) { + nvlist_t **child; + int c; + + child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *), + KM_SLEEP); + + for (c = 0; c < vd->vdev_children; c++) + child[c] = vdev_config_generate(spa, vd->vdev_child[c], + getstats, isspare, isl2cache); + + VERIFY(nvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, + child, vd->vdev_children) == 0); + + for (c = 0; c < vd->vdev_children; c++) + nvlist_free(child[c]); + + kmem_free(child, vd->vdev_children * sizeof (nvlist_t *)); + + } else { + if (vd->vdev_offline && !vd->vdev_tmpoffline) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, + B_TRUE) == 0); + if (vd->vdev_faulted) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, + B_TRUE) == 0); + if (vd->vdev_degraded) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, + B_TRUE) == 0); + if (vd->vdev_removed) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, + B_TRUE) == 0); + if (vd->vdev_unspare) + VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, + B_TRUE) == 0); + } + + return (nv); +} + +nvlist_t * +vdev_label_read_config(vdev_t *vd) +{ + spa_t *spa = vd->vdev_spa; + nvlist_t *config = NULL; + vdev_phys_t *vp; + zio_t *zio; + int flags = + ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; + + ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); + + if (!vdev_readable(vd)) + return (NULL); + + vp = zio_buf_alloc(sizeof (vdev_phys_t)); + + for (int l = 0; l < VDEV_LABELS; l++) { + + zio = zio_root(spa, NULL, NULL, flags); + + vdev_label_read(zio, vd, l, vp, + offsetof(vdev_label_t, vl_vdev_phys), + sizeof (vdev_phys_t), NULL, NULL, flags); + + if (zio_wait(zio) == 0 && + nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist), + &config, 0) == 0) + break; + + if (config != NULL) { + nvlist_free(config); + config = NULL; + } + } + + zio_buf_free(vp, sizeof (vdev_phys_t)); + + return (config); +} + +/* + * Determine if a device is in use. The 'spare_guid' parameter will be filled + * in with the device guid if this spare is active elsewhere on the system. + */ +static boolean_t +vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, + uint64_t *spare_guid, uint64_t *l2cache_guid) +{ + spa_t *spa = vd->vdev_spa; + uint64_t state, pool_guid, device_guid, txg, spare_pool; + uint64_t vdtxg = 0; + nvlist_t *label; + + if (spare_guid) + *spare_guid = 0ULL; + if (l2cache_guid) + *l2cache_guid = 0ULL; + + /* + * Read the label, if any, and perform some basic sanity checks. + */ + if ((label = vdev_label_read_config(vd)) == NULL) + return (B_FALSE); + + (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG, + &vdtxg); + + if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, + &state) != 0 || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, + &device_guid) != 0) { + nvlist_free(label); + return (B_FALSE); + } + + if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && + (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, + &pool_guid) != 0 || + nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, + &txg) != 0)) { + nvlist_free(label); + return (B_FALSE); + } + + nvlist_free(label); + + /* + * Check to see if this device indeed belongs to the pool it claims to + * be a part of. The only way this is allowed is if the device is a hot + * spare (which we check for later on). + */ + if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && + !spa_guid_exists(pool_guid, device_guid) && + !spa_spare_exists(device_guid, NULL, NULL) && + !spa_l2cache_exists(device_guid, NULL)) + return (B_FALSE); + + /* + * If the transaction group is zero, then this an initialized (but + * unused) label. This is only an error if the create transaction + * on-disk is the same as the one we're using now, in which case the + * user has attempted to add the same vdev multiple times in the same + * transaction. + */ + if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE && + txg == 0 && vdtxg == crtxg) + return (B_TRUE); + + /* + * Check to see if this is a spare device. We do an explicit check for + * spa_has_spare() here because it may be on our pending list of spares + * to add. We also check if it is an l2cache device. + */ + if (spa_spare_exists(device_guid, &spare_pool, NULL) || + spa_has_spare(spa, device_guid)) { + if (spare_guid) + *spare_guid = device_guid; + + switch (reason) { + case VDEV_LABEL_CREATE: + case VDEV_LABEL_L2CACHE: + return (B_TRUE); + + case VDEV_LABEL_REPLACE: + return (!spa_has_spare(spa, device_guid) || + spare_pool != 0ULL); + + case VDEV_LABEL_SPARE: + return (spa_has_spare(spa, device_guid)); + } + } + + /* + * Check to see if this is an l2cache device. + */ + if (spa_l2cache_exists(device_guid, NULL)) + return (B_TRUE); + + /* + * If the device is marked ACTIVE, then this device is in use by another + * pool on the system. + */ + return (state == POOL_STATE_ACTIVE); +} + +/* + * Initialize a vdev label. We check to make sure each leaf device is not in + * use, and writable. We put down an initial label which we will later + * overwrite with a complete label. Note that it's important to do this + * sequentially, not in parallel, so that we catch cases of multiple use of the + * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with + * itself. + */ +int +vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) +{ + spa_t *spa = vd->vdev_spa; + nvlist_t *label; + vdev_phys_t *vp; + vdev_boot_header_t *vb; + uberblock_t *ub; + zio_t *zio; + char *buf; + size_t buflen; + int error; + uint64_t spare_guid, l2cache_guid; + int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; + + ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); + + for (int c = 0; c < vd->vdev_children; c++) + if ((error = vdev_label_init(vd->vdev_child[c], + crtxg, reason)) != 0) + return (error); + + if (!vd->vdev_ops->vdev_op_leaf) + return (0); + + /* + * Dead vdevs cannot be initialized. + */ + if (vdev_is_dead(vd)) + return (EIO); + + /* + * Determine if the vdev is in use. + */ + if (reason != VDEV_LABEL_REMOVE && + vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid)) + return (EBUSY); + + /* + * If this is a request to add or replace a spare or l2cache device + * that is in use elsewhere on the system, then we must update the + * guid (which was initialized to a random value) to reflect the + * actual GUID (which is shared between multiple pools). + */ + if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE && + spare_guid != 0ULL) { + uint64_t guid_delta = spare_guid - vd->vdev_guid; + + vd->vdev_guid += guid_delta; + + for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + pvd->vdev_guid_sum += guid_delta; + + /* + * If this is a replacement, then we want to fallthrough to the + * rest of the code. If we're adding a spare, then it's already + * labeled appropriately and we can just return. + */ + if (reason == VDEV_LABEL_SPARE) + return (0); + ASSERT(reason == VDEV_LABEL_REPLACE); + } + + if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE && + l2cache_guid != 0ULL) { + uint64_t guid_delta = l2cache_guid - vd->vdev_guid; + + vd->vdev_guid += guid_delta; + + for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + pvd->vdev_guid_sum += guid_delta; + + /* + * If this is a replacement, then we want to fallthrough to the + * rest of the code. If we're adding an l2cache, then it's + * already labeled appropriately and we can just return. + */ + if (reason == VDEV_LABEL_L2CACHE) + return (0); + ASSERT(reason == VDEV_LABEL_REPLACE); + } + + /* + * Initialize its label. + */ + vp = zio_buf_alloc(sizeof (vdev_phys_t)); + bzero(vp, sizeof (vdev_phys_t)); + + /* + * Generate a label describing the pool and our top-level vdev. + * We mark it as being from txg 0 to indicate that it's not + * really part of an active pool just yet. The labels will + * be written again with a meaningful txg by spa_sync(). + */ + if (reason == VDEV_LABEL_SPARE || + (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) { + /* + * For inactive hot spares, we generate a special label that + * identifies as a mutually shared hot spare. We write the + * label if we are adding a hot spare, or if we are removing an + * active hot spare (in which case we want to revert the + * labels). + */ + VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, + spa_version(spa)) == 0); + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, + POOL_STATE_SPARE) == 0); + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, + vd->vdev_guid) == 0); + } else if (reason == VDEV_LABEL_L2CACHE || + (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) { + /* + * For level 2 ARC devices, add a special label. + */ + VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION, + spa_version(spa)) == 0); + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE, + POOL_STATE_L2CACHE) == 0); + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID, + vd->vdev_guid) == 0); + } else { + label = spa_config_generate(spa, vd, 0ULL, B_FALSE); + + /* + * Add our creation time. This allows us to detect multiple + * vdev uses as described above, and automatically expires if we + * fail. + */ + VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG, + crtxg) == 0); + } + + buf = vp->vp_nvlist; + buflen = sizeof (vp->vp_nvlist); + + error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP); + if (error != 0) { + nvlist_free(label); + zio_buf_free(vp, sizeof (vdev_phys_t)); + /* EFAULT means nvlist_pack ran out of room */ + return (error == EFAULT ? ENAMETOOLONG : EINVAL); + } + + /* + * Initialize boot block header. + */ + vb = zio_buf_alloc(sizeof (vdev_boot_header_t)); + bzero(vb, sizeof (vdev_boot_header_t)); + vb->vb_magic = VDEV_BOOT_MAGIC; + vb->vb_version = VDEV_BOOT_VERSION; + vb->vb_offset = VDEV_BOOT_OFFSET; + vb->vb_size = VDEV_BOOT_SIZE; + + /* + * Initialize uberblock template. + */ + ub = zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)); + bzero(ub, VDEV_UBERBLOCK_SIZE(vd)); + *ub = spa->spa_uberblock; + ub->ub_txg = 0; + + /* + * Write everything in parallel. + */ + zio = zio_root(spa, NULL, NULL, flags); + + for (int l = 0; l < VDEV_LABELS; l++) { + + vdev_label_write(zio, vd, l, vp, + offsetof(vdev_label_t, vl_vdev_phys), + sizeof (vdev_phys_t), NULL, NULL, flags); + + vdev_label_write(zio, vd, l, vb, + offsetof(vdev_label_t, vl_boot_header), + sizeof (vdev_boot_header_t), NULL, NULL, flags); + + for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { + vdev_label_write(zio, vd, l, ub, + VDEV_UBERBLOCK_OFFSET(vd, n), + VDEV_UBERBLOCK_SIZE(vd), NULL, NULL, flags); + } + } + + error = zio_wait(zio); + + nvlist_free(label); + zio_buf_free(ub, VDEV_UBERBLOCK_SIZE(vd)); + zio_buf_free(vb, sizeof (vdev_boot_header_t)); + zio_buf_free(vp, sizeof (vdev_phys_t)); + + /* + * If this vdev hasn't been previously identified as a spare, then we + * mark it as such only if a) we are labeling it as a spare, or b) it + * exists as a spare elsewhere in the system. Do the same for + * level 2 ARC devices. + */ + if (error == 0 && !vd->vdev_isspare && + (reason == VDEV_LABEL_SPARE || + spa_spare_exists(vd->vdev_guid, NULL, NULL))) + spa_spare_add(vd); + + if (error == 0 && !vd->vdev_isl2cache && + (reason == VDEV_LABEL_L2CACHE || + spa_l2cache_exists(vd->vdev_guid, NULL))) + spa_l2cache_add(vd); + + return (error); +} + +/* + * ========================================================================== + * uberblock load/sync + * ========================================================================== + */ + +/* + * Consider the following situation: txg is safely synced to disk. We've + * written the first uberblock for txg + 1, and then we lose power. When we + * come back up, we fail to see the uberblock for txg + 1 because, say, + * it was on a mirrored device and the replica to which we wrote txg + 1 + * is now offline. If we then make some changes and sync txg + 1, and then + * the missing replica comes back, then for a new seconds we'll have two + * conflicting uberblocks on disk with the same txg. The solution is simple: + * among uberblocks with equal txg, choose the one with the latest timestamp. + */ +static int +vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2) +{ + if (ub1->ub_txg < ub2->ub_txg) + return (-1); + if (ub1->ub_txg > ub2->ub_txg) + return (1); + + if (ub1->ub_timestamp < ub2->ub_timestamp) + return (-1); + if (ub1->ub_timestamp > ub2->ub_timestamp) + return (1); + + return (0); +} + +static void +vdev_uberblock_load_done(zio_t *zio) +{ + zio_t *rio = zio->io_private; + uberblock_t *ub = zio->io_data; + uberblock_t *ubbest = rio->io_private; + + ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(zio->io_vd)); + + if (zio->io_error == 0 && uberblock_verify(ub) == 0) { + mutex_enter(&rio->io_lock); + if (vdev_uberblock_compare(ub, ubbest) > 0) + *ubbest = *ub; + mutex_exit(&rio->io_lock); + } + + zio_buf_free(zio->io_data, zio->io_size); +} + +void +vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest) +{ + spa_t *spa = vd->vdev_spa; + vdev_t *rvd = spa->spa_root_vdev; + int flags = + ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; + + if (vd == rvd) { + ASSERT(zio == NULL); + spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); + zio = zio_root(spa, NULL, ubbest, flags); + bzero(ubbest, sizeof (uberblock_t)); + } + + ASSERT(zio != NULL); + + for (int c = 0; c < vd->vdev_children; c++) + vdev_uberblock_load(zio, vd->vdev_child[c], ubbest); + + if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { + for (int l = 0; l < VDEV_LABELS; l++) { + for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { + vdev_label_read(zio, vd, l, + zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)), + VDEV_UBERBLOCK_OFFSET(vd, n), + VDEV_UBERBLOCK_SIZE(vd), + vdev_uberblock_load_done, zio, flags); + } + } + } + + if (vd == rvd) { + (void) zio_wait(zio); + spa_config_exit(spa, SCL_ALL, FTAG); + } +} + +/* + * On success, increment root zio's count of good writes. + * We only get credit for writes to known-visible vdevs; see spa_vdev_add(). + */ +static void +vdev_uberblock_sync_done(zio_t *zio) +{ + uint64_t *good_writes = zio->io_private; + + if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0) + atomic_add_64(good_writes, 1); +} + +/* + * Write the uberblock to all labels of all leaves of the specified vdev. + */ +static void +vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags) +{ + uberblock_t *ubbuf; + int n; + + for (int c = 0; c < vd->vdev_children; c++) + vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags); + + if (!vd->vdev_ops->vdev_op_leaf) + return; + + if (!vdev_writeable(vd)) + return; + + n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1); + + ubbuf = zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)); + bzero(ubbuf, VDEV_UBERBLOCK_SIZE(vd)); + *ubbuf = *ub; + + for (int l = 0; l < VDEV_LABELS; l++) + vdev_label_write(zio, vd, l, ubbuf, + VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd), + vdev_uberblock_sync_done, zio->io_private, + flags | ZIO_FLAG_DONT_PROPAGATE); + + zio_buf_free(ubbuf, VDEV_UBERBLOCK_SIZE(vd)); +} + +int +vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) +{ + spa_t *spa = svd[0]->vdev_spa; + zio_t *zio; + uint64_t good_writes = 0; + + zio = zio_root(spa, NULL, &good_writes, flags); + + for (int v = 0; v < svdcount; v++) + vdev_uberblock_sync(zio, ub, svd[v], flags); + + (void) zio_wait(zio); + + /* + * Flush the uberblocks to disk. This ensures that the odd labels + * are no longer needed (because the new uberblocks and the even + * labels are safely on disk), so it is safe to overwrite them. + */ + zio = zio_root(spa, NULL, NULL, flags); + + for (int v = 0; v < svdcount; v++) + zio_flush(zio, svd[v]); + + (void) zio_wait(zio); + + return (good_writes >= 1 ? 0 : EIO); +} + +/* + * On success, increment the count of good writes for our top-level vdev. + */ +static void +vdev_label_sync_done(zio_t *zio) +{ + uint64_t *good_writes = zio->io_private; + + if (zio->io_error == 0) + atomic_add_64(good_writes, 1); +} + +/* + * If there weren't enough good writes, indicate failure to the parent. + */ +static void +vdev_label_sync_top_done(zio_t *zio) +{ + uint64_t *good_writes = zio->io_private; + + if (*good_writes == 0) + zio->io_error = EIO; + + kmem_free(good_writes, sizeof (uint64_t)); +} + +/* + * We ignore errors for log and cache devices, simply free the private data. + */ +static void +vdev_label_sync_ignore_done(zio_t *zio) +{ + kmem_free(zio->io_private, sizeof (uint64_t)); +} + +/* + * Write all even or odd labels to all leaves of the specified vdev. + */ +static void +vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) +{ + nvlist_t *label; + vdev_phys_t *vp; + char *buf; + size_t buflen; + + for (int c = 0; c < vd->vdev_children; c++) + vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags); + + if (!vd->vdev_ops->vdev_op_leaf) + return; + + if (!vdev_writeable(vd)) + return; + + /* + * Generate a label describing the top-level config to which we belong. + */ + label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE); + + vp = zio_buf_alloc(sizeof (vdev_phys_t)); + bzero(vp, sizeof (vdev_phys_t)); + + buf = vp->vp_nvlist; + buflen = sizeof (vp->vp_nvlist); + + if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0) { + for (; l < VDEV_LABELS; l += 2) { + vdev_label_write(zio, vd, l, vp, + offsetof(vdev_label_t, vl_vdev_phys), + sizeof (vdev_phys_t), + vdev_label_sync_done, zio->io_private, + flags | ZIO_FLAG_DONT_PROPAGATE); + } + } + + zio_buf_free(vp, sizeof (vdev_phys_t)); + nvlist_free(label); +} + +int +vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) +{ + list_t *dl = &spa->spa_config_dirty_list; + vdev_t *vd; + zio_t *zio; + int error; + + /* + * Write the new labels to disk. + */ + zio = zio_root(spa, NULL, NULL, flags); + + for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { + uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t), + KM_SLEEP); + zio_t *vio = zio_null(zio, spa, + (vd->vdev_islog || vd->vdev_aux != NULL) ? + vdev_label_sync_ignore_done : vdev_label_sync_top_done, + good_writes, flags); + vdev_label_sync(vio, vd, l, txg, flags); + zio_nowait(vio); + } + + error = zio_wait(zio); + + /* + * Flush the new labels to disk. + */ + zio = zio_root(spa, NULL, NULL, flags); + + for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) + zio_flush(zio, vd); + + (void) zio_wait(zio); + + return (error); +} + +/* + * Sync the uberblock and any changes to the vdev configuration. + * + * The order of operations is carefully crafted to ensure that + * if the system panics or loses power at any time, the state on disk + * is still transactionally consistent. The in-line comments below + * describe the failure semantics at each stage. + * + * Moreover, vdev_config_sync() is designed to be idempotent: if it fails + * at any time, you can just call it again, and it will resume its work. + */ +int +vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) +{ + spa_t *spa = svd[0]->vdev_spa; + uberblock_t *ub = &spa->spa_uberblock; + vdev_t *vd; + zio_t *zio; + int error; + int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; + + ASSERT(ub->ub_txg <= txg); + + /* + * If this isn't a resync due to I/O errors, + * and nothing changed in this transaction group, + * and the vdev configuration hasn't changed, + * then there's nothing to do. + */ + if (ub->ub_txg < txg && + uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE && + list_is_empty(&spa->spa_config_dirty_list)) + return (0); + + if (txg > spa_freeze_txg(spa)) + return (0); + + ASSERT(txg <= spa->spa_final_txg); + + /* + * Flush the write cache of every disk that's been written to + * in this transaction group. This ensures that all blocks + * written in this txg will be committed to stable storage + * before any uberblock that references them. + */ + zio = zio_root(spa, NULL, NULL, flags); + + for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd; + vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg))) + zio_flush(zio, vd); + + (void) zio_wait(zio); + + /* + * Sync out the even labels (L0, L2) for every dirty vdev. If the + * system dies in the middle of this process, that's OK: all of the + * even labels that made it to disk will be newer than any uberblock, + * and will therefore be considered invalid. The odd labels (L1, L3), + * which have not yet been touched, will still be valid. We flush + * the new labels to disk to ensure that all even-label updates + * are committed to stable storage before the uberblock update. + */ + if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) + return (error); + + /* + * Sync the uberblocks to all vdevs in svd[]. + * If the system dies in the middle of this step, there are two cases + * to consider, and the on-disk state is consistent either way: + * + * (1) If none of the new uberblocks made it to disk, then the + * previous uberblock will be the newest, and the odd labels + * (which had not yet been touched) will be valid with respect + * to that uberblock. + * + * (2) If one or more new uberblocks made it to disk, then they + * will be the newest, and the even labels (which had all + * been successfully committed) will be valid with respect + * to the new uberblocks. + */ + if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) + return (error); + + /* + * Sync out odd labels for every dirty vdev. If the system dies + * in the middle of this process, the even labels and the new + * uberblocks will suffice to open the pool. The next time + * the pool is opened, the first thing we'll do -- before any + * user data is modified -- is mark every vdev dirty so that + * all labels will be brought up to date. We flush the new labels + * to disk to ensure that all odd-label updates are committed to + * stable storage before the next transaction group begins. + */ + return (vdev_label_sync_list(spa, 1, txg, flags)); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_mirror.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_mirror.c new file mode 100644 index 000000000000..c4629ff45087 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_mirror.c @@ -0,0 +1,480 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +/* + * Virtual device vector for mirroring. + */ + +typedef struct mirror_child { + vdev_t *mc_vd; + uint64_t mc_offset; + int mc_error; + uint8_t mc_tried; + uint8_t mc_skipped; + uint8_t mc_speculative; +} mirror_child_t; + +typedef struct mirror_map { + int mm_children; + int mm_replacing; + int mm_preferred; + int mm_root; + mirror_child_t mm_child[1]; +} mirror_map_t; + +int vdev_mirror_shift = 21; + +static void +vdev_mirror_map_free(zio_t *zio) +{ + mirror_map_t *mm = zio->io_vsd; + + kmem_free(mm, offsetof(mirror_map_t, mm_child[mm->mm_children])); +} + +static mirror_map_t * +vdev_mirror_map_alloc(zio_t *zio) +{ + mirror_map_t *mm = NULL; + mirror_child_t *mc; + vdev_t *vd = zio->io_vd; + int c, d; + + if (vd == NULL) { + dva_t *dva = zio->io_bp->blk_dva; + spa_t *spa = zio->io_spa; + + c = BP_GET_NDVAS(zio->io_bp); + + mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP); + mm->mm_children = c; + mm->mm_replacing = B_FALSE; + mm->mm_preferred = spa_get_random(c); + mm->mm_root = B_TRUE; + + /* + * Check the other, lower-index DVAs to see if they're on + * the same vdev as the child we picked. If they are, use + * them since they are likely to have been allocated from + * the primary metaslab in use at the time, and hence are + * more likely to have locality with single-copy data. + */ + for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) { + if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c])) + mm->mm_preferred = d; + } + + for (c = 0; c < mm->mm_children; c++) { + mc = &mm->mm_child[c]; + + mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c])); + mc->mc_offset = DVA_GET_OFFSET(&dva[c]); + } + } else { + c = vd->vdev_children; + + mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP); + mm->mm_children = c; + mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops || + vd->vdev_ops == &vdev_spare_ops); + mm->mm_preferred = mm->mm_replacing ? 0 : + (zio->io_offset >> vdev_mirror_shift) % c; + mm->mm_root = B_FALSE; + + for (c = 0; c < mm->mm_children; c++) { + mc = &mm->mm_child[c]; + mc->mc_vd = vd->vdev_child[c]; + mc->mc_offset = zio->io_offset; + } + } + + zio->io_vsd = mm; + zio->io_vsd_free = vdev_mirror_map_free; + return (mm); +} + +static int +vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) +{ + vdev_t *cvd; + uint64_t c; + int numerrors = 0; + int ret, lasterror = 0; + + if (vd->vdev_children == 0) { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + + for (c = 0; c < vd->vdev_children; c++) { + cvd = vd->vdev_child[c]; + + if ((ret = vdev_open(cvd)) != 0) { + lasterror = ret; + numerrors++; + continue; + } + + *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; + *ashift = MAX(*ashift, cvd->vdev_ashift); + } + + if (numerrors == vd->vdev_children) { + vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; + return (lasterror); + } + + return (0); +} + +static void +vdev_mirror_close(vdev_t *vd) +{ + uint64_t c; + + for (c = 0; c < vd->vdev_children; c++) + vdev_close(vd->vdev_child[c]); +} + +static void +vdev_mirror_child_done(zio_t *zio) +{ + mirror_child_t *mc = zio->io_private; + + mc->mc_error = zio->io_error; + mc->mc_tried = 1; + mc->mc_skipped = 0; +} + +static void +vdev_mirror_scrub_done(zio_t *zio) +{ + mirror_child_t *mc = zio->io_private; + + if (zio->io_error == 0) { + zio_t *pio = zio->io_parent; + mutex_enter(&pio->io_lock); + ASSERT3U(zio->io_size, >=, pio->io_size); + bcopy(zio->io_data, pio->io_data, pio->io_size); + mutex_exit(&pio->io_lock); + } + + zio_buf_free(zio->io_data, zio->io_size); + + mc->mc_error = zio->io_error; + mc->mc_tried = 1; + mc->mc_skipped = 0; +} + +/* + * Try to find a child whose DTL doesn't contain the block we want to read. + * If we can't, try the read on any vdev we haven't already tried. + */ +static int +vdev_mirror_child_select(zio_t *zio) +{ + mirror_map_t *mm = zio->io_vsd; + mirror_child_t *mc; + uint64_t txg = zio->io_txg; + int i, c; + + ASSERT(zio->io_bp == NULL || zio->io_bp->blk_birth == txg); + + /* + * Try to find a child whose DTL doesn't contain the block to read. + * If a child is known to be completely inaccessible (indicated by + * vdev_readable() returning B_FALSE), don't even try. + */ + for (i = 0, c = mm->mm_preferred; i < mm->mm_children; i++, c++) { + if (c >= mm->mm_children) + c = 0; + mc = &mm->mm_child[c]; + if (mc->mc_tried || mc->mc_skipped) + continue; + if (!vdev_readable(mc->mc_vd)) { + mc->mc_error = ENXIO; + mc->mc_tried = 1; /* don't even try */ + mc->mc_skipped = 1; + continue; + } + if (!vdev_dtl_contains(&mc->mc_vd->vdev_dtl_map, txg, 1)) + return (c); + mc->mc_error = ESTALE; + mc->mc_skipped = 1; + mc->mc_speculative = 1; + } + + /* + * Every device is either missing or has this txg in its DTL. + * Look for any child we haven't already tried before giving up. + */ + for (c = 0; c < mm->mm_children; c++) + if (!mm->mm_child[c].mc_tried) + return (c); + + /* + * Every child failed. There's no place left to look. + */ + return (-1); +} + +static int +vdev_mirror_io_start(zio_t *zio) +{ + mirror_map_t *mm; + mirror_child_t *mc; + int c, children; + + mm = vdev_mirror_map_alloc(zio); + + if (zio->io_type == ZIO_TYPE_READ) { + if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) { + /* + * For scrubbing reads we need to allocate a read + * buffer for each child and issue reads to all + * children. If any child succeeds, it will copy its + * data into zio->io_data in vdev_mirror_scrub_done. + */ + for (c = 0; c < mm->mm_children; c++) { + mc = &mm->mm_child[c]; + zio_nowait(zio_vdev_child_io(zio, zio->io_bp, + mc->mc_vd, mc->mc_offset, + zio_buf_alloc(zio->io_size), zio->io_size, + zio->io_type, zio->io_priority, 0, + vdev_mirror_scrub_done, mc)); + } + return (ZIO_PIPELINE_CONTINUE); + } + /* + * For normal reads just pick one child. + */ + c = vdev_mirror_child_select(zio); + children = (c >= 0); + } else { + ASSERT(zio->io_type == ZIO_TYPE_WRITE); + + /* + * If this is a resilvering I/O to a replacing vdev, + * only the last child should be written -- unless the + * first child happens to have a DTL entry here as well. + * All other writes go to all children. + */ + if ((zio->io_flags & ZIO_FLAG_RESILVER) && mm->mm_replacing && + !vdev_dtl_contains(&mm->mm_child[0].mc_vd->vdev_dtl_map, + zio->io_txg, 1)) { + c = mm->mm_children - 1; + children = 1; + } else { + c = 0; + children = mm->mm_children; + } + } + + while (children--) { + mc = &mm->mm_child[c]; + zio_nowait(zio_vdev_child_io(zio, zio->io_bp, + mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, + zio->io_type, zio->io_priority, 0, + vdev_mirror_child_done, mc)); + c++; + } + + return (ZIO_PIPELINE_CONTINUE); +} + +static int +vdev_mirror_worst_error(mirror_map_t *mm) +{ + int error[2] = { 0, 0 }; + + for (int c = 0; c < mm->mm_children; c++) { + mirror_child_t *mc = &mm->mm_child[c]; + int s = mc->mc_speculative; + error[s] = zio_worst_error(error[s], mc->mc_error); + } + + return (error[0] ? error[0] : error[1]); +} + +static void +vdev_mirror_io_done(zio_t *zio) +{ + mirror_map_t *mm = zio->io_vsd; + mirror_child_t *mc; + int c; + int good_copies = 0; + int unexpected_errors = 0; + + for (c = 0; c < mm->mm_children; c++) { + mc = &mm->mm_child[c]; + + if (mc->mc_error) { + if (!mc->mc_skipped) + unexpected_errors++; + } else if (mc->mc_tried) { + good_copies++; + } + } + + if (zio->io_type == ZIO_TYPE_WRITE) { + /* + * XXX -- for now, treat partial writes as success. + * + * Now that we support write reallocation, it would be better + * to treat partial failure as real failure unless there are + * no non-degraded top-level vdevs left, and not update DTLs + * if we intend to reallocate. + */ + /* XXPOLICY */ + if (good_copies != mm->mm_children) { + /* + * Always require at least one good copy. + * + * For ditto blocks (io_vd == NULL), require + * all copies to be good. + * + * XXX -- for replacing vdevs, there's no great answer. + * If the old device is really dead, we may not even + * be able to access it -- so we only want to + * require good writes to the new device. But if + * the new device turns out to be flaky, we want + * to be able to detach it -- which requires all + * writes to the old device to have succeeded. + */ + if (good_copies == 0 || zio->io_vd == NULL) + zio->io_error = vdev_mirror_worst_error(mm); + } + return; + } + + ASSERT(zio->io_type == ZIO_TYPE_READ); + + /* + * If we don't have a good copy yet, keep trying other children. + */ + /* XXPOLICY */ + if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) { + ASSERT(c >= 0 && c < mm->mm_children); + mc = &mm->mm_child[c]; + zio_vdev_io_redone(zio); + zio_nowait(zio_vdev_child_io(zio, zio->io_bp, + mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, + ZIO_TYPE_READ, zio->io_priority, 0, + vdev_mirror_child_done, mc)); + return; + } + + /* XXPOLICY */ + if (good_copies == 0) { + zio->io_error = vdev_mirror_worst_error(mm); + ASSERT(zio->io_error != 0); + } + + if (good_copies && (spa_mode & FWRITE) && + (unexpected_errors || + (zio->io_flags & ZIO_FLAG_RESILVER) || + ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) { + /* + * Use the good data we have in hand to repair damaged children. + */ + for (c = 0; c < mm->mm_children; c++) { + /* + * Don't rewrite known good children. + * Not only is it unnecessary, it could + * actually be harmful: if the system lost + * power while rewriting the only good copy, + * there would be no good copies left! + */ + mc = &mm->mm_child[c]; + + if (mc->mc_error == 0) { + if (mc->mc_tried) + continue; + if (!(zio->io_flags & ZIO_FLAG_SCRUB) && + !vdev_dtl_contains(&mc->mc_vd->vdev_dtl_map, + zio->io_txg, 1)) + continue; + mc->mc_error = ESTALE; + } + + zio_nowait(zio_vdev_child_io(zio, zio->io_bp, + mc->mc_vd, mc->mc_offset, + zio->io_data, zio->io_size, + ZIO_TYPE_WRITE, zio->io_priority, + ZIO_FLAG_IO_REPAIR, NULL, NULL)); + } + } +} + +static void +vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded) +{ + if (faulted == vd->vdev_children) + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_NO_REPLICAS); + else if (degraded + faulted != 0) + vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); + else + vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); +} + +vdev_ops_t vdev_mirror_ops = { + vdev_mirror_open, + vdev_mirror_close, + vdev_default_asize, + vdev_mirror_io_start, + vdev_mirror_io_done, + vdev_mirror_state_change, + VDEV_TYPE_MIRROR, /* name of this vdev type */ + B_FALSE /* not a leaf vdev */ +}; + +vdev_ops_t vdev_replacing_ops = { + vdev_mirror_open, + vdev_mirror_close, + vdev_default_asize, + vdev_mirror_io_start, + vdev_mirror_io_done, + vdev_mirror_state_change, + VDEV_TYPE_REPLACING, /* name of this vdev type */ + B_FALSE /* not a leaf vdev */ +}; + +vdev_ops_t vdev_spare_ops = { + vdev_mirror_open, + vdev_mirror_close, + vdev_default_asize, + vdev_mirror_io_start, + vdev_mirror_io_done, + vdev_mirror_state_change, + VDEV_TYPE_SPARE, /* name of this vdev type */ + B_FALSE /* not a leaf vdev */ +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_missing.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_missing.c new file mode 100644 index 000000000000..731f7d3dcec9 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_missing.c @@ -0,0 +1,85 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * The 'missing' vdev is a special vdev type used only during import. It + * signifies a placeholder in the root vdev for some vdev that we know is + * missing. We pass it down to the kernel to allow the rest of the + * configuration to parsed and an attempt made to open all available devices. + * Because its GUID is always 0, we know that the guid sum will mismatch and we + * won't be able to open the pool anyway. + */ + +#include +#include +#include +#include +#include + +/* ARGSUSED */ +static int +vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) +{ + /* + * Really this should just fail. But then the root vdev will be in the + * faulted state with VDEV_AUX_NO_REPLICAS, when what we really want is + * VDEV_AUX_BAD_GUID_SUM. So we pretend to succeed, knowing that we + * will fail the GUID sum check before ever trying to open the pool. + */ + *psize = SPA_MINDEVSIZE; + *ashift = SPA_MINBLOCKSHIFT; + return (0); +} + +/* ARGSUSED */ +static void +vdev_missing_close(vdev_t *vd) +{ +} + +/* ARGSUSED */ +static int +vdev_missing_io_start(zio_t *zio) +{ + zio->io_error = ENOTSUP; + return (ZIO_PIPELINE_CONTINUE); +} + +/* ARGSUSED */ +static void +vdev_missing_io_done(zio_t *zio) +{ +} + +vdev_ops_t vdev_missing_ops = { + vdev_missing_open, + vdev_missing_close, + vdev_default_asize, + vdev_missing_io_start, + vdev_missing_io_done, + NULL, + VDEV_TYPE_MISSING, /* name of this vdev type */ + B_TRUE /* leaf vdev */ +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_queue.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_queue.c new file mode 100644 index 000000000000..46fca0e3b629 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_queue.c @@ -0,0 +1,308 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +/* + * These tunables are for performance analysis. + */ +/* + * zfs_vdev_max_pending is the maximum number of i/os concurrently + * pending to each device. zfs_vdev_min_pending is the initial number + * of i/os pending to each device (before it starts ramping up to + * max_pending). + */ +int zfs_vdev_max_pending = 35; +int zfs_vdev_min_pending = 4; + +/* deadline = pri + (lbolt >> time_shift) */ +int zfs_vdev_time_shift = 6; + +/* exponential I/O issue ramp-up rate */ +int zfs_vdev_ramp_rate = 2; + +/* + * i/os will be aggregated into a single large i/o up to + * zfs_vdev_aggregation_limit bytes long. + */ +int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; + +/* + * Virtual device vector for disk I/O scheduling. + */ +int +vdev_queue_deadline_compare(const void *x1, const void *x2) +{ + const zio_t *z1 = x1; + const zio_t *z2 = x2; + + if (z1->io_deadline < z2->io_deadline) + return (-1); + if (z1->io_deadline > z2->io_deadline) + return (1); + + if (z1->io_offset < z2->io_offset) + return (-1); + if (z1->io_offset > z2->io_offset) + return (1); + + if (z1 < z2) + return (-1); + if (z1 > z2) + return (1); + + return (0); +} + +int +vdev_queue_offset_compare(const void *x1, const void *x2) +{ + const zio_t *z1 = x1; + const zio_t *z2 = x2; + + if (z1->io_offset < z2->io_offset) + return (-1); + if (z1->io_offset > z2->io_offset) + return (1); + + if (z1 < z2) + return (-1); + if (z1 > z2) + return (1); + + return (0); +} + +void +vdev_queue_init(vdev_t *vd) +{ + vdev_queue_t *vq = &vd->vdev_queue; + + mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); + + avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare, + sizeof (zio_t), offsetof(struct zio, io_deadline_node)); + + avl_create(&vq->vq_read_tree, vdev_queue_offset_compare, + sizeof (zio_t), offsetof(struct zio, io_offset_node)); + + avl_create(&vq->vq_write_tree, vdev_queue_offset_compare, + sizeof (zio_t), offsetof(struct zio, io_offset_node)); + + avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare, + sizeof (zio_t), offsetof(struct zio, io_offset_node)); +} + +void +vdev_queue_fini(vdev_t *vd) +{ + vdev_queue_t *vq = &vd->vdev_queue; + + avl_destroy(&vq->vq_deadline_tree); + avl_destroy(&vq->vq_read_tree); + avl_destroy(&vq->vq_write_tree); + avl_destroy(&vq->vq_pending_tree); + + mutex_destroy(&vq->vq_lock); +} + +static void +vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) +{ + avl_add(&vq->vq_deadline_tree, zio); + avl_add(zio->io_vdev_tree, zio); +} + +static void +vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) +{ + avl_remove(&vq->vq_deadline_tree, zio); + avl_remove(zio->io_vdev_tree, zio); +} + +static void +vdev_queue_agg_io_done(zio_t *aio) +{ + zio_t *dio; + uint64_t offset = 0; + + while ((dio = aio->io_delegate_list) != NULL) { + if (aio->io_type == ZIO_TYPE_READ) + bcopy((char *)aio->io_data + offset, dio->io_data, + dio->io_size); + offset += dio->io_size; + aio->io_delegate_list = dio->io_delegate_next; + dio->io_delegate_next = NULL; + dio->io_error = aio->io_error; + zio_execute(dio); + } + ASSERT3U(offset, ==, aio->io_size); + + zio_buf_free(aio->io_data, aio->io_size); +} + +#define IS_ADJACENT(io, nio) \ + ((io)->io_offset + (io)->io_size == (nio)->io_offset) + +static zio_t * +vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit) +{ + zio_t *fio, *lio, *aio, *dio; + avl_tree_t *tree; + uint64_t size; + + ASSERT(MUTEX_HELD(&vq->vq_lock)); + + if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit || + avl_numnodes(&vq->vq_deadline_tree) == 0) + return (NULL); + + fio = lio = avl_first(&vq->vq_deadline_tree); + + tree = fio->io_vdev_tree; + size = fio->io_size; + + while ((dio = AVL_PREV(tree, fio)) != NULL && IS_ADJACENT(dio, fio) && + !((dio->io_flags | fio->io_flags) & ZIO_FLAG_DONT_AGGREGATE) && + size + dio->io_size <= zfs_vdev_aggregation_limit) { + dio->io_delegate_next = fio; + fio = dio; + size += dio->io_size; + } + + while ((dio = AVL_NEXT(tree, lio)) != NULL && IS_ADJACENT(lio, dio) && + !((lio->io_flags | dio->io_flags) & ZIO_FLAG_DONT_AGGREGATE) && + size + dio->io_size <= zfs_vdev_aggregation_limit) { + lio->io_delegate_next = dio; + lio = dio; + size += dio->io_size; + } + + if (fio != lio) { + char *buf = zio_buf_alloc(size); + uint64_t offset = 0; + + ASSERT(size <= zfs_vdev_aggregation_limit); + + aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset, + buf, size, fio->io_type, ZIO_PRIORITY_NOW, + ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, + vdev_queue_agg_io_done, NULL); + + aio->io_delegate_list = fio; + + for (dio = fio; dio != NULL; dio = dio->io_delegate_next) { + ASSERT(dio->io_type == aio->io_type); + ASSERT(dio->io_vdev_tree == tree); + if (dio->io_type == ZIO_TYPE_WRITE) + bcopy(dio->io_data, buf + offset, dio->io_size); + offset += dio->io_size; + vdev_queue_io_remove(vq, dio); + zio_vdev_io_bypass(dio); + } + + ASSERT(offset == size); + + avl_add(&vq->vq_pending_tree, aio); + + return (aio); + } + + ASSERT(fio->io_vdev_tree == tree); + vdev_queue_io_remove(vq, fio); + + avl_add(&vq->vq_pending_tree, fio); + + return (fio); +} + +zio_t * +vdev_queue_io(zio_t *zio) +{ + vdev_queue_t *vq = &zio->io_vd->vdev_queue; + zio_t *nio; + + ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); + + if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) + return (zio); + + zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; + + if (zio->io_type == ZIO_TYPE_READ) + zio->io_vdev_tree = &vq->vq_read_tree; + else + zio->io_vdev_tree = &vq->vq_write_tree; + + mutex_enter(&vq->vq_lock); + + zio->io_deadline = (lbolt64 >> zfs_vdev_time_shift) + zio->io_priority; + + vdev_queue_io_add(vq, zio); + + nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending); + + mutex_exit(&vq->vq_lock); + + if (nio == NULL) + return (NULL); + + if (nio->io_done == vdev_queue_agg_io_done) { + zio_nowait(nio); + return (NULL); + } + + return (nio); +} + +void +vdev_queue_io_done(zio_t *zio) +{ + vdev_queue_t *vq = &zio->io_vd->vdev_queue; + + mutex_enter(&vq->vq_lock); + + avl_remove(&vq->vq_pending_tree, zio); + + for (int i = 0; i < zfs_vdev_ramp_rate; i++) { + zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); + if (nio == NULL) + break; + mutex_exit(&vq->vq_lock); + if (nio->io_done == vdev_queue_agg_io_done) { + zio_nowait(nio); + } else { + zio_vdev_io_reissue(nio); + zio_execute(nio); + } + mutex_enter(&vq->vq_lock); + } + + mutex_exit(&vq->vq_lock); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_raidz.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_raidz.c new file mode 100644 index 000000000000..69e314468ee4 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_raidz.c @@ -0,0 +1,1209 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ + +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Virtual device vector for RAID-Z. + * + * This vdev supports both single and double parity. For single parity, we + * use a simple XOR of all the data columns. For double parity, we use both + * the simple XOR as well as a technique described in "The mathematics of + * RAID-6" by H. Peter Anvin. This technique defines a Galois field, GF(2^8), + * over the integers expressable in a single byte. Briefly, the operations on + * the field are defined as follows: + * + * o addition (+) is represented by a bitwise XOR + * o subtraction (-) is therefore identical to addition: A + B = A - B + * o multiplication of A by 2 is defined by the following bitwise expression: + * (A * 2)_7 = A_6 + * (A * 2)_6 = A_5 + * (A * 2)_5 = A_4 + * (A * 2)_4 = A_3 + A_7 + * (A * 2)_3 = A_2 + A_7 + * (A * 2)_2 = A_1 + A_7 + * (A * 2)_1 = A_0 + * (A * 2)_0 = A_7 + * + * In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)). + * + * Observe that any number in the field (except for 0) can be expressed as a + * power of 2 -- a generator for the field. We store a table of the powers of + * 2 and logs base 2 for quick look ups, and exploit the fact that A * B can + * be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather + * than field addition). The inverse of a field element A (A^-1) is A^254. + * + * The two parity columns, P and Q, over several data columns, D_0, ... D_n-1, + * can be expressed by field operations: + * + * P = D_0 + D_1 + ... + D_n-2 + D_n-1 + * Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1 + * = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1 + * + * See the reconstruction code below for how P and Q can used individually or + * in concert to recover missing data columns. + */ + +typedef struct raidz_col { + uint64_t rc_devidx; /* child device index for I/O */ + uint64_t rc_offset; /* device offset */ + uint64_t rc_size; /* I/O size */ + void *rc_data; /* I/O data */ + int rc_error; /* I/O error for this device */ + uint8_t rc_tried; /* Did we attempt this I/O column? */ + uint8_t rc_skipped; /* Did we skip this I/O column? */ +} raidz_col_t; + +typedef struct raidz_map { + uint64_t rm_cols; /* Column count */ + uint64_t rm_bigcols; /* Number of oversized columns */ + uint64_t rm_asize; /* Actual total I/O size */ + uint64_t rm_missingdata; /* Count of missing data devices */ + uint64_t rm_missingparity; /* Count of missing parity devices */ + uint64_t rm_firstdatacol; /* First data column/parity count */ + raidz_col_t rm_col[1]; /* Flexible array of I/O columns */ +} raidz_map_t; + +#define VDEV_RAIDZ_P 0 +#define VDEV_RAIDZ_Q 1 + +#define VDEV_RAIDZ_MAXPARITY 2 + +#define VDEV_RAIDZ_MUL_2(a) (((a) << 1) ^ (((a) & 0x80) ? 0x1d : 0)) + +/* + * These two tables represent powers and logs of 2 in the Galois field defined + * above. These values were computed by repeatedly multiplying by 2 as above. + */ +static const uint8_t vdev_raidz_pow2[256] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26, + 0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9, + 0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, + 0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35, + 0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23, + 0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0, + 0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1, + 0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc, + 0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0, + 0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f, + 0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2, + 0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88, + 0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce, + 0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93, + 0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc, + 0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9, + 0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54, + 0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa, + 0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73, + 0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e, + 0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff, + 0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4, + 0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41, + 0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e, + 0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6, + 0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef, + 0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09, + 0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5, + 0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16, + 0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83, + 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01 +}; +static const uint8_t vdev_raidz_log2[256] = { + 0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6, + 0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b, + 0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81, + 0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71, + 0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21, + 0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45, + 0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9, + 0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6, + 0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd, + 0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88, + 0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd, + 0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40, + 0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e, + 0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d, + 0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b, + 0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57, + 0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d, + 0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18, + 0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c, + 0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e, + 0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd, + 0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61, + 0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e, + 0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2, + 0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76, + 0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6, + 0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa, + 0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a, + 0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51, + 0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7, + 0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8, + 0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf, +}; + +/* + * Multiply a given number by 2 raised to the given power. + */ +static uint8_t +vdev_raidz_exp2(uint_t a, int exp) +{ + if (a == 0) + return (0); + + ASSERT(exp >= 0); + ASSERT(vdev_raidz_log2[a] > 0 || a == 1); + + exp += vdev_raidz_log2[a]; + if (exp > 255) + exp -= 255; + + return (vdev_raidz_pow2[exp]); +} + +static void +vdev_raidz_map_free(zio_t *zio) +{ + raidz_map_t *rm = zio->io_vsd; + int c; + + for (c = 0; c < rm->rm_firstdatacol; c++) + zio_buf_free(rm->rm_col[c].rc_data, rm->rm_col[c].rc_size); + + kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_cols])); +} + +static raidz_map_t * +vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols, + uint64_t nparity) +{ + raidz_map_t *rm; + uint64_t b = zio->io_offset >> unit_shift; + uint64_t s = zio->io_size >> unit_shift; + uint64_t f = b % dcols; + uint64_t o = (b / dcols) << unit_shift; + uint64_t q, r, c, bc, col, acols, coff, devidx; + + q = s / (dcols - nparity); + r = s - q * (dcols - nparity); + bc = (r == 0 ? 0 : r + nparity); + + acols = (q == 0 ? bc : dcols); + + rm = kmem_alloc(offsetof(raidz_map_t, rm_col[acols]), KM_SLEEP); + + rm->rm_cols = acols; + rm->rm_bigcols = bc; + rm->rm_asize = 0; + rm->rm_missingdata = 0; + rm->rm_missingparity = 0; + rm->rm_firstdatacol = nparity; + + for (c = 0; c < acols; c++) { + col = f + c; + coff = o; + if (col >= dcols) { + col -= dcols; + coff += 1ULL << unit_shift; + } + rm->rm_col[c].rc_devidx = col; + rm->rm_col[c].rc_offset = coff; + rm->rm_col[c].rc_size = (q + (c < bc)) << unit_shift; + rm->rm_col[c].rc_data = NULL; + rm->rm_col[c].rc_error = 0; + rm->rm_col[c].rc_tried = 0; + rm->rm_col[c].rc_skipped = 0; + rm->rm_asize += rm->rm_col[c].rc_size; + } + + rm->rm_asize = roundup(rm->rm_asize, (nparity + 1) << unit_shift); + + for (c = 0; c < rm->rm_firstdatacol; c++) + rm->rm_col[c].rc_data = zio_buf_alloc(rm->rm_col[c].rc_size); + + rm->rm_col[c].rc_data = zio->io_data; + + for (c = c + 1; c < acols; c++) + rm->rm_col[c].rc_data = (char *)rm->rm_col[c - 1].rc_data + + rm->rm_col[c - 1].rc_size; + + /* + * If all data stored spans all columns, there's a danger that parity + * will always be on the same device and, since parity isn't read + * during normal operation, that that device's I/O bandwidth won't be + * used effectively. We therefore switch the parity every 1MB. + * + * ... at least that was, ostensibly, the theory. As a practical + * matter unless we juggle the parity between all devices evenly, we + * won't see any benefit. Further, occasional writes that aren't a + * multiple of the LCM of the number of children and the minimum + * stripe width are sufficient to avoid pessimal behavior. + * Unfortunately, this decision created an implicit on-disk format + * requirement that we need to support for all eternity, but only + * for single-parity RAID-Z. + */ + ASSERT(rm->rm_cols >= 2); + ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size); + + if (rm->rm_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) { + devidx = rm->rm_col[0].rc_devidx; + o = rm->rm_col[0].rc_offset; + rm->rm_col[0].rc_devidx = rm->rm_col[1].rc_devidx; + rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset; + rm->rm_col[1].rc_devidx = devidx; + rm->rm_col[1].rc_offset = o; + } + + zio->io_vsd = rm; + zio->io_vsd_free = vdev_raidz_map_free; + return (rm); +} + +static void +vdev_raidz_generate_parity_p(raidz_map_t *rm) +{ + uint64_t *p, *src, pcount, ccount, i; + int c; + + pcount = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0]); + + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + src = rm->rm_col[c].rc_data; + p = rm->rm_col[VDEV_RAIDZ_P].rc_data; + ccount = rm->rm_col[c].rc_size / sizeof (src[0]); + + if (c == rm->rm_firstdatacol) { + ASSERT(ccount == pcount); + for (i = 0; i < ccount; i++, p++, src++) { + *p = *src; + } + } else { + ASSERT(ccount <= pcount); + for (i = 0; i < ccount; i++, p++, src++) { + *p ^= *src; + } + } + } +} + +static void +vdev_raidz_generate_parity_pq(raidz_map_t *rm) +{ + uint64_t *q, *p, *src, pcount, ccount, mask, i; + int c; + + pcount = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0]); + ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size == + rm->rm_col[VDEV_RAIDZ_Q].rc_size); + + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + src = rm->rm_col[c].rc_data; + p = rm->rm_col[VDEV_RAIDZ_P].rc_data; + q = rm->rm_col[VDEV_RAIDZ_Q].rc_data; + ccount = rm->rm_col[c].rc_size / sizeof (src[0]); + + if (c == rm->rm_firstdatacol) { + ASSERT(ccount == pcount || ccount == 0); + for (i = 0; i < ccount; i++, p++, q++, src++) { + *q = *src; + *p = *src; + } + for (; i < pcount; i++, p++, q++, src++) { + *q = 0; + *p = 0; + } + } else { + ASSERT(ccount <= pcount); + + /* + * Rather than multiplying each byte individually (as + * described above), we are able to handle 8 at once + * by generating a mask based on the high bit in each + * byte and using that to conditionally XOR in 0x1d. + */ + for (i = 0; i < ccount; i++, p++, q++, src++) { + mask = *q & 0x8080808080808080ULL; + mask = (mask << 1) - (mask >> 7); + *q = ((*q << 1) & 0xfefefefefefefefeULL) ^ + (mask & 0x1d1d1d1d1d1d1d1dULL); + *q ^= *src; + *p ^= *src; + } + + /* + * Treat short columns as though they are full of 0s. + */ + for (; i < pcount; i++, q++) { + mask = *q & 0x8080808080808080ULL; + mask = (mask << 1) - (mask >> 7); + *q = ((*q << 1) & 0xfefefefefefefefeULL) ^ + (mask & 0x1d1d1d1d1d1d1d1dULL); + } + } + } +} + +static void +vdev_raidz_reconstruct_p(raidz_map_t *rm, int x) +{ + uint64_t *dst, *src, xcount, ccount, count, i; + int c; + + xcount = rm->rm_col[x].rc_size / sizeof (src[0]); + ASSERT(xcount <= rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0])); + ASSERT(xcount > 0); + + src = rm->rm_col[VDEV_RAIDZ_P].rc_data; + dst = rm->rm_col[x].rc_data; + for (i = 0; i < xcount; i++, dst++, src++) { + *dst = *src; + } + + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + src = rm->rm_col[c].rc_data; + dst = rm->rm_col[x].rc_data; + + if (c == x) + continue; + + ccount = rm->rm_col[c].rc_size / sizeof (src[0]); + count = MIN(ccount, xcount); + + for (i = 0; i < count; i++, dst++, src++) { + *dst ^= *src; + } + } +} + +static void +vdev_raidz_reconstruct_q(raidz_map_t *rm, int x) +{ + uint64_t *dst, *src, xcount, ccount, count, mask, i; + uint8_t *b; + int c, j, exp; + + xcount = rm->rm_col[x].rc_size / sizeof (src[0]); + ASSERT(xcount <= rm->rm_col[VDEV_RAIDZ_Q].rc_size / sizeof (src[0])); + + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + src = rm->rm_col[c].rc_data; + dst = rm->rm_col[x].rc_data; + + if (c == x) + ccount = 0; + else + ccount = rm->rm_col[c].rc_size / sizeof (src[0]); + + count = MIN(ccount, xcount); + + if (c == rm->rm_firstdatacol) { + for (i = 0; i < count; i++, dst++, src++) { + *dst = *src; + } + for (; i < xcount; i++, dst++) { + *dst = 0; + } + + } else { + /* + * For an explanation of this, see the comment in + * vdev_raidz_generate_parity_pq() above. + */ + for (i = 0; i < count; i++, dst++, src++) { + mask = *dst & 0x8080808080808080ULL; + mask = (mask << 1) - (mask >> 7); + *dst = ((*dst << 1) & 0xfefefefefefefefeULL) ^ + (mask & 0x1d1d1d1d1d1d1d1dULL); + *dst ^= *src; + } + + for (; i < xcount; i++, dst++) { + mask = *dst & 0x8080808080808080ULL; + mask = (mask << 1) - (mask >> 7); + *dst = ((*dst << 1) & 0xfefefefefefefefeULL) ^ + (mask & 0x1d1d1d1d1d1d1d1dULL); + } + } + } + + src = rm->rm_col[VDEV_RAIDZ_Q].rc_data; + dst = rm->rm_col[x].rc_data; + exp = 255 - (rm->rm_cols - 1 - x); + + for (i = 0; i < xcount; i++, dst++, src++) { + *dst ^= *src; + for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) { + *b = vdev_raidz_exp2(*b, exp); + } + } +} + +static void +vdev_raidz_reconstruct_pq(raidz_map_t *rm, int x, int y) +{ + uint8_t *p, *q, *pxy, *qxy, *xd, *yd, tmp, a, b, aexp, bexp; + void *pdata, *qdata; + uint64_t xsize, ysize, i; + + ASSERT(x < y); + ASSERT(x >= rm->rm_firstdatacol); + ASSERT(y < rm->rm_cols); + + ASSERT(rm->rm_col[x].rc_size >= rm->rm_col[y].rc_size); + + /* + * Move the parity data aside -- we're going to compute parity as + * though columns x and y were full of zeros -- Pxy and Qxy. We want to + * reuse the parity generation mechanism without trashing the actual + * parity so we make those columns appear to be full of zeros by + * setting their lengths to zero. + */ + pdata = rm->rm_col[VDEV_RAIDZ_P].rc_data; + qdata = rm->rm_col[VDEV_RAIDZ_Q].rc_data; + xsize = rm->rm_col[x].rc_size; + ysize = rm->rm_col[y].rc_size; + + rm->rm_col[VDEV_RAIDZ_P].rc_data = + zio_buf_alloc(rm->rm_col[VDEV_RAIDZ_P].rc_size); + rm->rm_col[VDEV_RAIDZ_Q].rc_data = + zio_buf_alloc(rm->rm_col[VDEV_RAIDZ_Q].rc_size); + rm->rm_col[x].rc_size = 0; + rm->rm_col[y].rc_size = 0; + + vdev_raidz_generate_parity_pq(rm); + + rm->rm_col[x].rc_size = xsize; + rm->rm_col[y].rc_size = ysize; + + p = pdata; + q = qdata; + pxy = rm->rm_col[VDEV_RAIDZ_P].rc_data; + qxy = rm->rm_col[VDEV_RAIDZ_Q].rc_data; + xd = rm->rm_col[x].rc_data; + yd = rm->rm_col[y].rc_data; + + /* + * We now have: + * Pxy = P + D_x + D_y + * Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y + * + * We can then solve for D_x: + * D_x = A * (P + Pxy) + B * (Q + Qxy) + * where + * A = 2^(x - y) * (2^(x - y) + 1)^-1 + * B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1 + * + * With D_x in hand, we can easily solve for D_y: + * D_y = P + Pxy + D_x + */ + + a = vdev_raidz_pow2[255 + x - y]; + b = vdev_raidz_pow2[255 - (rm->rm_cols - 1 - x)]; + tmp = 255 - vdev_raidz_log2[a ^ 1]; + + aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)]; + bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)]; + + for (i = 0; i < xsize; i++, p++, q++, pxy++, qxy++, xd++, yd++) { + *xd = vdev_raidz_exp2(*p ^ *pxy, aexp) ^ + vdev_raidz_exp2(*q ^ *qxy, bexp); + + if (i < ysize) + *yd = *p ^ *pxy ^ *xd; + } + + zio_buf_free(rm->rm_col[VDEV_RAIDZ_P].rc_data, + rm->rm_col[VDEV_RAIDZ_P].rc_size); + zio_buf_free(rm->rm_col[VDEV_RAIDZ_Q].rc_data, + rm->rm_col[VDEV_RAIDZ_Q].rc_size); + + /* + * Restore the saved parity data. + */ + rm->rm_col[VDEV_RAIDZ_P].rc_data = pdata; + rm->rm_col[VDEV_RAIDZ_Q].rc_data = qdata; +} + + +static int +vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) +{ + vdev_t *cvd; + uint64_t nparity = vd->vdev_nparity; + int c, error; + int lasterror = 0; + int numerrors = 0; + + ASSERT(nparity > 0); + + if (nparity > VDEV_RAIDZ_MAXPARITY || + vd->vdev_children < nparity + 1) { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + + for (c = 0; c < vd->vdev_children; c++) { + cvd = vd->vdev_child[c]; + + if ((error = vdev_open(cvd)) != 0) { + lasterror = error; + numerrors++; + continue; + } + + *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; + *ashift = MAX(*ashift, cvd->vdev_ashift); + } + + *asize *= vd->vdev_children; + + if (numerrors > nparity) { + vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; + return (lasterror); + } + + return (0); +} + +static void +vdev_raidz_close(vdev_t *vd) +{ + int c; + + for (c = 0; c < vd->vdev_children; c++) + vdev_close(vd->vdev_child[c]); +} + +static uint64_t +vdev_raidz_asize(vdev_t *vd, uint64_t psize) +{ + uint64_t asize; + uint64_t ashift = vd->vdev_top->vdev_ashift; + uint64_t cols = vd->vdev_children; + uint64_t nparity = vd->vdev_nparity; + + asize = ((psize - 1) >> ashift) + 1; + asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity)); + asize = roundup(asize, nparity + 1) << ashift; + + return (asize); +} + +static void +vdev_raidz_child_done(zio_t *zio) +{ + raidz_col_t *rc = zio->io_private; + + rc->rc_error = zio->io_error; + rc->rc_tried = 1; + rc->rc_skipped = 0; +} + +static int +vdev_raidz_io_start(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + vdev_t *tvd = vd->vdev_top; + vdev_t *cvd; + blkptr_t *bp = zio->io_bp; + raidz_map_t *rm; + raidz_col_t *rc; + int c; + + rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift, vd->vdev_children, + vd->vdev_nparity); + + ASSERT3U(rm->rm_asize, ==, vdev_psize_to_asize(vd, zio->io_size)); + + if (zio->io_type == ZIO_TYPE_WRITE) { + /* + * Generate RAID parity in the first virtual columns. + */ + if (rm->rm_firstdatacol == 1) + vdev_raidz_generate_parity_p(rm); + else + vdev_raidz_generate_parity_pq(rm); + + for (c = 0; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + cvd = vd->vdev_child[rc->rc_devidx]; + zio_nowait(zio_vdev_child_io(zio, NULL, cvd, + rc->rc_offset, rc->rc_data, rc->rc_size, + zio->io_type, zio->io_priority, 0, + vdev_raidz_child_done, rc)); + } + + return (ZIO_PIPELINE_CONTINUE); + } + + ASSERT(zio->io_type == ZIO_TYPE_READ); + + /* + * Iterate over the columns in reverse order so that we hit the parity + * last -- any errors along the way will force us to read the parity + * data. + */ + for (c = rm->rm_cols - 1; c >= 0; c--) { + rc = &rm->rm_col[c]; + cvd = vd->vdev_child[rc->rc_devidx]; + if (!vdev_readable(cvd)) { + if (c >= rm->rm_firstdatacol) + rm->rm_missingdata++; + else + rm->rm_missingparity++; + rc->rc_error = ENXIO; + rc->rc_tried = 1; /* don't even try */ + rc->rc_skipped = 1; + continue; + } + if (vdev_dtl_contains(&cvd->vdev_dtl_map, bp->blk_birth, 1)) { + if (c >= rm->rm_firstdatacol) + rm->rm_missingdata++; + else + rm->rm_missingparity++; + rc->rc_error = ESTALE; + rc->rc_skipped = 1; + continue; + } + if (c >= rm->rm_firstdatacol || rm->rm_missingdata > 0 || + (zio->io_flags & ZIO_FLAG_SCRUB)) { + zio_nowait(zio_vdev_child_io(zio, NULL, cvd, + rc->rc_offset, rc->rc_data, rc->rc_size, + zio->io_type, zio->io_priority, 0, + vdev_raidz_child_done, rc)); + } + } + + return (ZIO_PIPELINE_CONTINUE); +} + +/* + * Report a checksum error for a child of a RAID-Z device. + */ +static void +raidz_checksum_error(zio_t *zio, raidz_col_t *rc) +{ + vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx]; + + if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { + mutex_enter(&vd->vdev_stat_lock); + vd->vdev_stat.vs_checksum_errors++; + mutex_exit(&vd->vdev_stat_lock); + } + + if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) + zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, + zio->io_spa, vd, zio, rc->rc_offset, rc->rc_size); +} + +/* + * Generate the parity from the data columns. If we tried and were able to + * read the parity without error, verify that the generated parity matches the + * data we read. If it doesn't, we fire off a checksum error. Return the + * number such failures. + */ +static int +raidz_parity_verify(zio_t *zio, raidz_map_t *rm) +{ + void *orig[VDEV_RAIDZ_MAXPARITY]; + int c, ret = 0; + raidz_col_t *rc; + + for (c = 0; c < rm->rm_firstdatacol; c++) { + rc = &rm->rm_col[c]; + if (!rc->rc_tried || rc->rc_error != 0) + continue; + orig[c] = zio_buf_alloc(rc->rc_size); + bcopy(rc->rc_data, orig[c], rc->rc_size); + } + + if (rm->rm_firstdatacol == 1) + vdev_raidz_generate_parity_p(rm); + else + vdev_raidz_generate_parity_pq(rm); + + for (c = 0; c < rm->rm_firstdatacol; c++) { + rc = &rm->rm_col[c]; + if (!rc->rc_tried || rc->rc_error != 0) + continue; + if (bcmp(orig[c], rc->rc_data, rc->rc_size) != 0) { + raidz_checksum_error(zio, rc); + rc->rc_error = ECKSUM; + ret++; + } + zio_buf_free(orig[c], rc->rc_size); + } + + return (ret); +} + +static uint64_t raidz_corrected_p; +static uint64_t raidz_corrected_q; +static uint64_t raidz_corrected_pq; + +static int +vdev_raidz_worst_error(raidz_map_t *rm) +{ + int error = 0; + + for (int c = 0; c < rm->rm_cols; c++) + error = zio_worst_error(error, rm->rm_col[c].rc_error); + + return (error); +} + +static void +vdev_raidz_io_done(zio_t *zio) +{ + vdev_t *vd = zio->io_vd; + vdev_t *cvd; + raidz_map_t *rm = zio->io_vsd; + raidz_col_t *rc, *rc1; + int unexpected_errors = 0; + int parity_errors = 0; + int parity_untried = 0; + int data_errors = 0; + int total_errors = 0; + int n, c, c1; + + ASSERT(zio->io_bp != NULL); /* XXX need to add code to enforce this */ + + ASSERT(rm->rm_missingparity <= rm->rm_firstdatacol); + ASSERT(rm->rm_missingdata <= rm->rm_cols - rm->rm_firstdatacol); + + for (c = 0; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + + if (rc->rc_error) { + ASSERT(rc->rc_error != ECKSUM); /* child has no bp */ + + if (c < rm->rm_firstdatacol) + parity_errors++; + else + data_errors++; + + if (!rc->rc_skipped) + unexpected_errors++; + + total_errors++; + } else if (c < rm->rm_firstdatacol && !rc->rc_tried) { + parity_untried++; + } + } + + if (zio->io_type == ZIO_TYPE_WRITE) { + /* + * XXX -- for now, treat partial writes as a success. + * (If we couldn't write enough columns to reconstruct + * the data, the I/O failed. Otherwise, good enough.) + * + * Now that we support write reallocation, it would be better + * to treat partial failure as real failure unless there are + * no non-degraded top-level vdevs left, and not update DTLs + * if we intend to reallocate. + */ + /* XXPOLICY */ + if (total_errors > rm->rm_firstdatacol) + zio->io_error = vdev_raidz_worst_error(rm); + + return; + } + + ASSERT(zio->io_type == ZIO_TYPE_READ); + /* + * There are three potential phases for a read: + * 1. produce valid data from the columns read + * 2. read all disks and try again + * 3. perform combinatorial reconstruction + * + * Each phase is progressively both more expensive and less likely to + * occur. If we encounter more errors than we can repair or all phases + * fail, we have no choice but to return an error. + */ + + /* + * If the number of errors we saw was correctable -- less than or equal + * to the number of parity disks read -- attempt to produce data that + * has a valid checksum. Naturally, this case applies in the absence of + * any errors. + */ + if (total_errors <= rm->rm_firstdatacol - parity_untried) { + switch (data_errors) { + case 0: + if (zio_checksum_error(zio) == 0) { + /* + * If we read parity information (unnecessarily + * as it happens since no reconstruction was + * needed) regenerate and verify the parity. + * We also regenerate parity when resilvering + * so we can write it out to the failed device + * later. + */ + if (parity_errors + parity_untried < + rm->rm_firstdatacol || + (zio->io_flags & ZIO_FLAG_RESILVER)) { + n = raidz_parity_verify(zio, rm); + unexpected_errors += n; + ASSERT(parity_errors + n <= + rm->rm_firstdatacol); + } + goto done; + } + break; + + case 1: + /* + * We either attempt to read all the parity columns or + * none of them. If we didn't try to read parity, we + * wouldn't be here in the correctable case. There must + * also have been fewer parity errors than parity + * columns or, again, we wouldn't be in this code path. + */ + ASSERT(parity_untried == 0); + ASSERT(parity_errors < rm->rm_firstdatacol); + + /* + * Find the column that reported the error. + */ + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + if (rc->rc_error != 0) + break; + } + ASSERT(c != rm->rm_cols); + ASSERT(!rc->rc_skipped || rc->rc_error == ENXIO || + rc->rc_error == ESTALE); + + if (rm->rm_col[VDEV_RAIDZ_P].rc_error == 0) { + vdev_raidz_reconstruct_p(rm, c); + } else { + ASSERT(rm->rm_firstdatacol > 1); + vdev_raidz_reconstruct_q(rm, c); + } + + if (zio_checksum_error(zio) == 0) { + if (rm->rm_col[VDEV_RAIDZ_P].rc_error == 0) + atomic_inc_64(&raidz_corrected_p); + else + atomic_inc_64(&raidz_corrected_q); + + /* + * If there's more than one parity disk that + * was successfully read, confirm that the + * other parity disk produced the correct data. + * This routine is suboptimal in that it + * regenerates both the parity we wish to test + * as well as the parity we just used to + * perform the reconstruction, but this should + * be a relatively uncommon case, and can be + * optimized if it becomes a problem. + * We also regenerate parity when resilvering + * so we can write it out to the failed device + * later. + */ + if (parity_errors < rm->rm_firstdatacol - 1 || + (zio->io_flags & ZIO_FLAG_RESILVER)) { + n = raidz_parity_verify(zio, rm); + unexpected_errors += n; + ASSERT(parity_errors + n <= + rm->rm_firstdatacol); + } + + goto done; + } + break; + + case 2: + /* + * Two data column errors require double parity. + */ + ASSERT(rm->rm_firstdatacol == 2); + + /* + * Find the two columns that reported errors. + */ + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + if (rc->rc_error != 0) + break; + } + ASSERT(c != rm->rm_cols); + ASSERT(!rc->rc_skipped || rc->rc_error == ENXIO || + rc->rc_error == ESTALE); + + for (c1 = c++; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + if (rc->rc_error != 0) + break; + } + ASSERT(c != rm->rm_cols); + ASSERT(!rc->rc_skipped || rc->rc_error == ENXIO || + rc->rc_error == ESTALE); + + vdev_raidz_reconstruct_pq(rm, c1, c); + + if (zio_checksum_error(zio) == 0) { + atomic_inc_64(&raidz_corrected_pq); + goto done; + } + break; + + default: + ASSERT(rm->rm_firstdatacol <= 2); + ASSERT(0); + } + } + + /* + * This isn't a typical situation -- either we got a read error or + * a child silently returned bad data. Read every block so we can + * try again with as much data and parity as we can track down. If + * we've already been through once before, all children will be marked + * as tried so we'll proceed to combinatorial reconstruction. + */ + unexpected_errors = 1; + rm->rm_missingdata = 0; + rm->rm_missingparity = 0; + + for (c = 0; c < rm->rm_cols; c++) { + if (rm->rm_col[c].rc_tried) + continue; + + zio_vdev_io_redone(zio); + do { + rc = &rm->rm_col[c]; + if (rc->rc_tried) + continue; + zio_nowait(zio_vdev_child_io(zio, NULL, + vd->vdev_child[rc->rc_devidx], + rc->rc_offset, rc->rc_data, rc->rc_size, + zio->io_type, zio->io_priority, 0, + vdev_raidz_child_done, rc)); + } while (++c < rm->rm_cols); + + return; + } + + /* + * At this point we've attempted to reconstruct the data given the + * errors we detected, and we've attempted to read all columns. There + * must, therefore, be one or more additional problems -- silent errors + * resulting in invalid data rather than explicit I/O errors resulting + * in absent data. Before we attempt combinatorial reconstruction make + * sure we have a chance of coming up with the right answer. + */ + if (total_errors >= rm->rm_firstdatacol) { + zio->io_error = vdev_raidz_worst_error(rm); + /* + * If there were exactly as many device errors as parity + * columns, yet we couldn't reconstruct the data, then at + * least one device must have returned bad data silently. + */ + if (total_errors == rm->rm_firstdatacol) + zio->io_error = zio_worst_error(zio->io_error, ECKSUM); + goto done; + } + + if (rm->rm_col[VDEV_RAIDZ_P].rc_error == 0) { + /* + * Attempt to reconstruct the data from parity P. + */ + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + void *orig; + rc = &rm->rm_col[c]; + + orig = zio_buf_alloc(rc->rc_size); + bcopy(rc->rc_data, orig, rc->rc_size); + vdev_raidz_reconstruct_p(rm, c); + + if (zio_checksum_error(zio) == 0) { + zio_buf_free(orig, rc->rc_size); + atomic_inc_64(&raidz_corrected_p); + + /* + * If this child didn't know that it returned + * bad data, inform it. + */ + if (rc->rc_tried && rc->rc_error == 0) + raidz_checksum_error(zio, rc); + rc->rc_error = ECKSUM; + goto done; + } + + bcopy(orig, rc->rc_data, rc->rc_size); + zio_buf_free(orig, rc->rc_size); + } + } + + if (rm->rm_firstdatacol > 1 && rm->rm_col[VDEV_RAIDZ_Q].rc_error == 0) { + /* + * Attempt to reconstruct the data from parity Q. + */ + for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) { + void *orig; + rc = &rm->rm_col[c]; + + orig = zio_buf_alloc(rc->rc_size); + bcopy(rc->rc_data, orig, rc->rc_size); + vdev_raidz_reconstruct_q(rm, c); + + if (zio_checksum_error(zio) == 0) { + zio_buf_free(orig, rc->rc_size); + atomic_inc_64(&raidz_corrected_q); + + /* + * If this child didn't know that it returned + * bad data, inform it. + */ + if (rc->rc_tried && rc->rc_error == 0) + raidz_checksum_error(zio, rc); + rc->rc_error = ECKSUM; + goto done; + } + + bcopy(orig, rc->rc_data, rc->rc_size); + zio_buf_free(orig, rc->rc_size); + } + } + + if (rm->rm_firstdatacol > 1 && + rm->rm_col[VDEV_RAIDZ_P].rc_error == 0 && + rm->rm_col[VDEV_RAIDZ_Q].rc_error == 0) { + /* + * Attempt to reconstruct the data from both P and Q. + */ + for (c = rm->rm_firstdatacol; c < rm->rm_cols - 1; c++) { + void *orig, *orig1; + rc = &rm->rm_col[c]; + + orig = zio_buf_alloc(rc->rc_size); + bcopy(rc->rc_data, orig, rc->rc_size); + + for (c1 = c + 1; c1 < rm->rm_cols; c1++) { + rc1 = &rm->rm_col[c1]; + + orig1 = zio_buf_alloc(rc1->rc_size); + bcopy(rc1->rc_data, orig1, rc1->rc_size); + + vdev_raidz_reconstruct_pq(rm, c, c1); + + if (zio_checksum_error(zio) == 0) { + zio_buf_free(orig, rc->rc_size); + zio_buf_free(orig1, rc1->rc_size); + atomic_inc_64(&raidz_corrected_pq); + + /* + * If these children didn't know they + * returned bad data, inform them. + */ + if (rc->rc_tried && rc->rc_error == 0) + raidz_checksum_error(zio, rc); + if (rc1->rc_tried && rc1->rc_error == 0) + raidz_checksum_error(zio, rc1); + + rc->rc_error = ECKSUM; + rc1->rc_error = ECKSUM; + + goto done; + } + + bcopy(orig1, rc1->rc_data, rc1->rc_size); + zio_buf_free(orig1, rc1->rc_size); + } + + bcopy(orig, rc->rc_data, rc->rc_size); + zio_buf_free(orig, rc->rc_size); + } + } + + /* + * All combinations failed to checksum. Generate checksum ereports for + * all children. + */ + zio->io_error = ECKSUM; + + if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { + for (c = 0; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, + zio->io_spa, vd->vdev_child[rc->rc_devidx], zio, + rc->rc_offset, rc->rc_size); + } + } + +done: + zio_checksum_verified(zio); + + if (zio->io_error == 0 && (spa_mode & FWRITE) && + (unexpected_errors || (zio->io_flags & ZIO_FLAG_RESILVER))) { + /* + * Use the good data we have in hand to repair damaged children. + */ + for (c = 0; c < rm->rm_cols; c++) { + rc = &rm->rm_col[c]; + cvd = vd->vdev_child[rc->rc_devidx]; + + if (rc->rc_error == 0) + continue; + + zio_nowait(zio_vdev_child_io(zio, NULL, cvd, + rc->rc_offset, rc->rc_data, rc->rc_size, + ZIO_TYPE_WRITE, zio->io_priority, + ZIO_FLAG_IO_REPAIR, NULL, NULL)); + } + } +} + +static void +vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded) +{ + if (faulted > vd->vdev_nparity) + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_NO_REPLICAS); + else if (degraded + faulted != 0) + vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); + else + vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); +} + +vdev_ops_t vdev_raidz_ops = { + vdev_raidz_open, + vdev_raidz_close, + vdev_raidz_asize, + vdev_raidz_io_start, + vdev_raidz_io_done, + vdev_raidz_state_change, + VDEV_TYPE_RAIDZ, /* name of this vdev type */ + B_FALSE /* not a leaf vdev */ +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_root.c b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_root.c new file mode 100644 index 000000000000..88383f002b80 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_root.c @@ -0,0 +1,118 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +/* + * Virtual device vector for the pool's root vdev. + */ + +/* + * We should be able to tolerate one failure with absolutely no damage + * to our metadata. Two failures will take out space maps, a bunch of + * indirect block trees, meta dnodes, dnodes, etc. Probably not a happy + * place to live. When we get smarter, we can liberalize this policy. + * e.g. If we haven't lost two consecutive top-level vdevs, then we are + * probably fine. Adding bean counters during alloc/free can make this + * future guesswork more accurate. + */ +static int +too_many_errors(vdev_t *vd, int numerrors) +{ + ASSERT3U(numerrors, <=, vd->vdev_children); + return (numerrors > 0); +} + +static int +vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) +{ + int c; + int lasterror = 0; + int numerrors = 0; + + if (vd->vdev_children == 0) { + vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; + return (EINVAL); + } + + for (c = 0; c < vd->vdev_children; c++) { + vdev_t *cvd = vd->vdev_child[c]; + int error; + + if ((error = vdev_open(cvd)) != 0 && + !cvd->vdev_islog) { + lasterror = error; + numerrors++; + continue; + } + } + + if (too_many_errors(vd, numerrors)) { + vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; + return (lasterror); + } + + *asize = 0; + *ashift = 0; + + return (0); +} + +static void +vdev_root_close(vdev_t *vd) +{ + int c; + + for (c = 0; c < vd->vdev_children; c++) + vdev_close(vd->vdev_child[c]); +} + +static void +vdev_root_state_change(vdev_t *vd, int faulted, int degraded) +{ + if (too_many_errors(vd, faulted)) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, + VDEV_AUX_NO_REPLICAS); + } else if (degraded) { + vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); + } else { + vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); + } +} + +vdev_ops_t vdev_root_ops = { + vdev_root_open, + vdev_root_close, + vdev_default_asize, + NULL, /* io_start - not applicable to the root */ + NULL, /* io_done - not applicable to the root */ + vdev_root_state_change, + VDEV_TYPE_ROOT, /* name of this vdev type */ + B_FALSE /* not a leaf vdev */ +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zap.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zap.c new file mode 100644 index 000000000000..ca859ec355da --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zap.c @@ -0,0 +1,1136 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + + +/* + * This file contains the top half of the zfs directory structure + * implementation. The bottom half is in zap_leaf.c. + * + * The zdir is an extendable hash data structure. There is a table of + * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are + * each a constant size and hold a variable number of directory entries. + * The buckets (aka "leaf nodes") are implemented in zap_leaf.c. + * + * The pointer table holds a power of 2 number of pointers. + * (1<zd_data->zd_phys->zd_prefix_len). The bucket pointed to + * by the pointer at index i in the table holds entries whose hash value + * has a zd_prefix_len - bit prefix + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +int fzap_default_block_shift = 14; /* 16k blocksize */ + +static void zap_leaf_pageout(dmu_buf_t *db, void *vl); +static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks); + + +void +fzap_byteswap(void *vbuf, size_t size) +{ + uint64_t block_type; + + block_type = *(uint64_t *)vbuf; + + if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF)) + zap_leaf_byteswap(vbuf, size); + else { + /* it's a ptrtbl block */ + byteswap_uint64_array(vbuf, size); + } +} + +void +fzap_upgrade(zap_t *zap, dmu_tx_t *tx) +{ + dmu_buf_t *db; + zap_leaf_t *l; + int i; + zap_phys_t *zp; + + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + zap->zap_ismicro = FALSE; + + (void) dmu_buf_update_user(zap->zap_dbuf, zap, zap, + &zap->zap_f.zap_phys, zap_evict); + + mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0); + zap->zap_f.zap_block_shift = highbit(zap->zap_dbuf->db_size) - 1; + + zp = zap->zap_f.zap_phys; + /* + * explicitly zero it since it might be coming from an + * initialized microzap + */ + bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size); + zp->zap_block_type = ZBT_HEADER; + zp->zap_magic = ZAP_MAGIC; + + zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap); + + zp->zap_freeblk = 2; /* block 1 will be the first leaf */ + zp->zap_num_leafs = 1; + zp->zap_num_entries = 0; + zp->zap_salt = zap->zap_salt; + zp->zap_normflags = zap->zap_normflags; + + /* block 1 will be the first leaf */ + for (i = 0; i < (1<zap_ptrtbl.zt_shift); i++) + ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1; + + /* + * set up block 1 - the first leaf + */ + VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + 1<l_dbuf = db; + l->l_phys = db->db_data; + + zap_leaf_init(l, zp->zap_normflags != 0); + + kmem_free(l, sizeof (zap_leaf_t)); + dmu_buf_rele(db, FTAG); +} + +static int +zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx) +{ + if (RW_WRITE_HELD(&zap->zap_rwlock)) + return (1); + if (rw_tryupgrade(&zap->zap_rwlock)) { + dmu_buf_will_dirty(zap->zap_dbuf, tx); + return (1); + } + return (0); +} + +/* + * Generic routines for dealing with the pointer & cookie tables. + */ + +static int +zap_table_grow(zap_t *zap, zap_table_phys_t *tbl, + void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n), + dmu_tx_t *tx) +{ + uint64_t b, newblk; + dmu_buf_t *db_old, *db_new; + int err; + int bs = FZAP_BLOCK_SHIFT(zap); + int hepb = 1<<(bs-4); + /* hepb = half the number of entries in a block */ + + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + ASSERT(tbl->zt_blk != 0); + ASSERT(tbl->zt_numblks > 0); + + if (tbl->zt_nextblk != 0) { + newblk = tbl->zt_nextblk; + } else { + newblk = zap_allocate_blocks(zap, tbl->zt_numblks * 2); + tbl->zt_nextblk = newblk; + ASSERT3U(tbl->zt_blks_copied, ==, 0); + dmu_prefetch(zap->zap_objset, zap->zap_object, + tbl->zt_blk << bs, tbl->zt_numblks << bs); + } + + /* + * Copy the ptrtbl from the old to new location. + */ + + b = tbl->zt_blks_copied; + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (tbl->zt_blk + b) << bs, FTAG, &db_old); + if (err) + return (err); + + /* first half of entries in old[b] go to new[2*b+0] */ + VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + (newblk + 2*b+0) << bs, FTAG, &db_new)); + dmu_buf_will_dirty(db_new, tx); + transfer_func(db_old->db_data, db_new->db_data, hepb); + dmu_buf_rele(db_new, FTAG); + + /* second half of entries in old[b] go to new[2*b+1] */ + VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + (newblk + 2*b+1) << bs, FTAG, &db_new)); + dmu_buf_will_dirty(db_new, tx); + transfer_func((uint64_t *)db_old->db_data + hepb, + db_new->db_data, hepb); + dmu_buf_rele(db_new, FTAG); + + dmu_buf_rele(db_old, FTAG); + + tbl->zt_blks_copied++; + + dprintf("copied block %llu of %llu\n", + tbl->zt_blks_copied, tbl->zt_numblks); + + if (tbl->zt_blks_copied == tbl->zt_numblks) { + (void) dmu_free_range(zap->zap_objset, zap->zap_object, + tbl->zt_blk << bs, tbl->zt_numblks << bs, tx); + + tbl->zt_blk = newblk; + tbl->zt_numblks *= 2; + tbl->zt_shift++; + tbl->zt_nextblk = 0; + tbl->zt_blks_copied = 0; + + dprintf("finished; numblocks now %llu (%lluk entries)\n", + tbl->zt_numblks, 1<<(tbl->zt_shift-10)); + } + + return (0); +} + +static int +zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val, + dmu_tx_t *tx) +{ + int err; + uint64_t blk, off; + int bs = FZAP_BLOCK_SHIFT(zap); + dmu_buf_t *db; + + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + ASSERT(tbl->zt_blk != 0); + + dprintf("storing %llx at index %llx\n", val, idx); + + blk = idx >> (bs-3); + off = idx & ((1<<(bs-3))-1); + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (tbl->zt_blk + blk) << bs, FTAG, &db); + if (err) + return (err); + dmu_buf_will_dirty(db, tx); + + if (tbl->zt_nextblk != 0) { + uint64_t idx2 = idx * 2; + uint64_t blk2 = idx2 >> (bs-3); + uint64_t off2 = idx2 & ((1<<(bs-3))-1); + dmu_buf_t *db2; + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (tbl->zt_nextblk + blk2) << bs, FTAG, &db2); + if (err) { + dmu_buf_rele(db, FTAG); + return (err); + } + dmu_buf_will_dirty(db2, tx); + ((uint64_t *)db2->db_data)[off2] = val; + ((uint64_t *)db2->db_data)[off2+1] = val; + dmu_buf_rele(db2, FTAG); + } + + ((uint64_t *)db->db_data)[off] = val; + dmu_buf_rele(db, FTAG); + + return (0); +} + +static int +zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp) +{ + uint64_t blk, off; + int err; + dmu_buf_t *db; + int bs = FZAP_BLOCK_SHIFT(zap); + + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + + blk = idx >> (bs-3); + off = idx & ((1<<(bs-3))-1); + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (tbl->zt_blk + blk) << bs, FTAG, &db); + if (err) + return (err); + *valp = ((uint64_t *)db->db_data)[off]; + dmu_buf_rele(db, FTAG); + + if (tbl->zt_nextblk != 0) { + /* + * read the nextblk for the sake of i/o error checking, + * so that zap_table_load() will catch errors for + * zap_table_store. + */ + blk = (idx*2) >> (bs-3); + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (tbl->zt_nextblk + blk) << bs, FTAG, &db); + dmu_buf_rele(db, FTAG); + } + return (err); +} + +/* + * Routines for growing the ptrtbl. + */ + +static void +zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n) +{ + int i; + for (i = 0; i < n; i++) { + uint64_t lb = src[i]; + dst[2*i+0] = lb; + dst[2*i+1] = lb; + } +} + +static int +zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx) +{ + /* In case things go horribly wrong. */ + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift >= ZAP_HASHBITS-2) + return (ENOSPC); + + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) { + /* + * We are outgrowing the "embedded" ptrtbl (the one + * stored in the header block). Give it its own entire + * block, which will double the size of the ptrtbl. + */ + uint64_t newblk; + dmu_buf_t *db_new; + int err; + + ASSERT3U(zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==, + ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); + ASSERT3U(zap->zap_f.zap_phys->zap_ptrtbl.zt_blk, ==, 0); + + newblk = zap_allocate_blocks(zap, 1); + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new); + if (err) + return (err); + dmu_buf_will_dirty(db_new, tx); + zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), + db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap)); + dmu_buf_rele(db_new, FTAG); + + zap->zap_f.zap_phys->zap_ptrtbl.zt_blk = newblk; + zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks = 1; + zap->zap_f.zap_phys->zap_ptrtbl.zt_shift++; + + ASSERT3U(1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==, + zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks << + (FZAP_BLOCK_SHIFT(zap)-3)); + + return (0); + } else { + return (zap_table_grow(zap, &zap->zap_f.zap_phys->zap_ptrtbl, + zap_ptrtbl_transfer, tx)); + } +} + +static void +zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx) +{ + dmu_buf_will_dirty(zap->zap_dbuf, tx); + mutex_enter(&zap->zap_f.zap_num_entries_mtx); + ASSERT(delta > 0 || zap->zap_f.zap_phys->zap_num_entries >= -delta); + zap->zap_f.zap_phys->zap_num_entries += delta; + mutex_exit(&zap->zap_f.zap_num_entries_mtx); +} + +static uint64_t +zap_allocate_blocks(zap_t *zap, int nblocks) +{ + uint64_t newblk; + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + newblk = zap->zap_f.zap_phys->zap_freeblk; + zap->zap_f.zap_phys->zap_freeblk += nblocks; + return (newblk); +} + +static zap_leaf_t * +zap_create_leaf(zap_t *zap, dmu_tx_t *tx) +{ + void *winner; + zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP); + + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + + rw_init(&l->l_rwlock, 0, 0, 0); + rw_enter(&l->l_rwlock, RW_WRITER); + l->l_blkid = zap_allocate_blocks(zap, 1); + l->l_dbuf = NULL; + l->l_phys = NULL; + + VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object, + l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf)); + winner = dmu_buf_set_user(l->l_dbuf, l, &l->l_phys, zap_leaf_pageout); + ASSERT(winner == NULL); + dmu_buf_will_dirty(l->l_dbuf, tx); + + zap_leaf_init(l, zap->zap_normflags != 0); + + zap->zap_f.zap_phys->zap_num_leafs++; + + return (l); +} + +int +fzap_count(zap_t *zap, uint64_t *count) +{ + ASSERT(!zap->zap_ismicro); + mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */ + *count = zap->zap_f.zap_phys->zap_num_entries; + mutex_exit(&zap->zap_f.zap_num_entries_mtx); + return (0); +} + +/* + * Routines for obtaining zap_leaf_t's + */ + +void +zap_put_leaf(zap_leaf_t *l) +{ + rw_exit(&l->l_rwlock); + dmu_buf_rele(l->l_dbuf, NULL); +} + +_NOTE(ARGSUSED(0)) +static void +zap_leaf_pageout(dmu_buf_t *db, void *vl) +{ + zap_leaf_t *l = vl; + + rw_destroy(&l->l_rwlock); + kmem_free(l, sizeof (zap_leaf_t)); +} + +static zap_leaf_t * +zap_open_leaf(uint64_t blkid, dmu_buf_t *db) +{ + zap_leaf_t *l, *winner; + + ASSERT(blkid != 0); + + l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP); + rw_init(&l->l_rwlock, 0, 0, 0); + rw_enter(&l->l_rwlock, RW_WRITER); + l->l_blkid = blkid; + l->l_bs = highbit(db->db_size)-1; + l->l_dbuf = db; + l->l_phys = NULL; + + winner = dmu_buf_set_user(db, l, &l->l_phys, zap_leaf_pageout); + + rw_exit(&l->l_rwlock); + if (winner != NULL) { + /* someone else set it first */ + zap_leaf_pageout(NULL, l); + l = winner; + } + + /* + * lhr_pad was previously used for the next leaf in the leaf + * chain. There should be no chained leafs (as we have removed + * support for them). + */ + ASSERT3U(l->l_phys->l_hdr.lh_pad1, ==, 0); + + /* + * There should be more hash entries than there can be + * chunks to put in the hash table + */ + ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3); + + /* The chunks should begin at the end of the hash table */ + ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==, + &l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]); + + /* The chunks should end at the end of the block */ + ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)) - + (uintptr_t)l->l_phys, ==, l->l_dbuf->db_size); + + return (l); +} + +static int +zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt, + zap_leaf_t **lp) +{ + dmu_buf_t *db; + zap_leaf_t *l; + int bs = FZAP_BLOCK_SHIFT(zap); + int err; + + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + blkid << bs, NULL, &db); + if (err) + return (err); + + ASSERT3U(db->db_object, ==, zap->zap_object); + ASSERT3U(db->db_offset, ==, blkid << bs); + ASSERT3U(db->db_size, ==, 1 << bs); + ASSERT(blkid != 0); + + l = dmu_buf_get_user(db); + + if (l == NULL) + l = zap_open_leaf(blkid, db); + + rw_enter(&l->l_rwlock, lt); + /* + * Must lock before dirtying, otherwise l->l_phys could change, + * causing ASSERT below to fail. + */ + if (lt == RW_WRITER) + dmu_buf_will_dirty(db, tx); + ASSERT3U(l->l_blkid, ==, blkid); + ASSERT3P(l->l_dbuf, ==, db); + ASSERT3P(l->l_phys, ==, l->l_dbuf->db_data); + ASSERT3U(l->l_phys->l_hdr.lh_block_type, ==, ZBT_LEAF); + ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); + + *lp = l; + return (0); +} + +static int +zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp) +{ + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) { + ASSERT3U(idx, <, + (1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift)); + *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); + return (0); + } else { + return (zap_table_load(zap, &zap->zap_f.zap_phys->zap_ptrtbl, + idx, valp)); + } +} + +static int +zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx) +{ + ASSERT(tx != NULL); + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk == 0) { + ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk; + return (0); + } else { + return (zap_table_store(zap, &zap->zap_f.zap_phys->zap_ptrtbl, + idx, blk, tx)); + } +} + +static int +zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp) +{ + uint64_t idx, blk; + int err; + + ASSERT(zap->zap_dbuf == NULL || + zap->zap_f.zap_phys == zap->zap_dbuf->db_data); + ASSERT3U(zap->zap_f.zap_phys->zap_magic, ==, ZAP_MAGIC); + idx = ZAP_HASH_IDX(h, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift); + err = zap_idx_to_blk(zap, idx, &blk); + if (err != 0) + return (err); + err = zap_get_leaf_byblk(zap, blk, tx, lt, lp); + + ASSERT(err || ZAP_HASH_IDX(h, (*lp)->l_phys->l_hdr.lh_prefix_len) == + (*lp)->l_phys->l_hdr.lh_prefix); + return (err); +} + +static int +zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx, zap_leaf_t **lp) +{ + zap_t *zap = zn->zn_zap; + uint64_t hash = zn->zn_hash; + zap_leaf_t *nl; + int prefix_diff, i, err; + uint64_t sibling; + int old_prefix_len = l->l_phys->l_hdr.lh_prefix_len; + + ASSERT3U(old_prefix_len, <=, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift); + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + + ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==, + l->l_phys->l_hdr.lh_prefix); + + if (zap_tryupgradedir(zap, tx) == 0 || + old_prefix_len == zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) { + /* We failed to upgrade, or need to grow the pointer table */ + objset_t *os = zap->zap_objset; + uint64_t object = zap->zap_object; + + zap_put_leaf(l); + zap_unlockdir(zap); + err = zap_lockdir(os, object, tx, RW_WRITER, + FALSE, FALSE, &zn->zn_zap); + zap = zn->zn_zap; + if (err) + return (err); + ASSERT(!zap->zap_ismicro); + + while (old_prefix_len == + zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) { + err = zap_grow_ptrtbl(zap, tx); + if (err) + return (err); + } + + err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l); + if (err) + return (err); + + if (l->l_phys->l_hdr.lh_prefix_len != old_prefix_len) { + /* it split while our locks were down */ + *lp = l; + return (0); + } + } + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + ASSERT3U(old_prefix_len, <, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift); + ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==, + l->l_phys->l_hdr.lh_prefix); + + prefix_diff = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift - + (old_prefix_len + 1); + sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff; + + /* check for i/o errors before doing zap_leaf_split */ + for (i = 0; i < (1ULL<l_blkid); + } + + nl = zap_create_leaf(zap, tx); + zap_leaf_split(l, nl, zap->zap_normflags != 0); + + /* set sibling pointers */ + for (i = 0; i < (1ULL<l_blkid, tx); + ASSERT3U(err, ==, 0); /* we checked for i/o errors above */ + } + + if (hash & (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len))) { + /* we want the sibling */ + zap_put_leaf(l); + *lp = nl; + } else { + zap_put_leaf(nl); + *lp = l; + } + + return (0); +} + +static void +zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx) +{ + zap_t *zap = zn->zn_zap; + int shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift; + int leaffull = (l->l_phys->l_hdr.lh_prefix_len == shift && + l->l_phys->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER); + + zap_put_leaf(l); + + if (leaffull || zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk) { + int err; + + /* + * We are in the middle of growing the pointer table, or + * this leaf will soon make us grow it. + */ + if (zap_tryupgradedir(zap, tx) == 0) { + objset_t *os = zap->zap_objset; + uint64_t zapobj = zap->zap_object; + + zap_unlockdir(zap); + err = zap_lockdir(os, zapobj, tx, + RW_WRITER, FALSE, FALSE, &zn->zn_zap); + zap = zn->zn_zap; + if (err) + return; + } + + /* could have finished growing while our locks were down */ + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift == shift) + (void) zap_grow_ptrtbl(zap, tx); + } +} + + +static int +fzap_checksize(const char *name, uint64_t integer_size, uint64_t num_integers) +{ + if (name && strlen(name) > ZAP_MAXNAMELEN) + return (E2BIG); + + /* Only integer sizes supported by C */ + switch (integer_size) { + case 1: + case 2: + case 4: + case 8: + break; + default: + return (EINVAL); + } + + if (integer_size * num_integers > ZAP_MAXVALUELEN) + return (E2BIG); + + return (0); +} + +/* + * Routines for manipulating attributes. + */ +int +fzap_lookup(zap_name_t *zn, + uint64_t integer_size, uint64_t num_integers, void *buf, + char *realname, int rn_len, boolean_t *ncp) +{ + zap_leaf_t *l; + int err; + zap_entry_handle_t zeh; + + err = fzap_checksize(zn->zn_name_orij, integer_size, num_integers); + if (err != 0) + return (err); + + err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l); + if (err != 0) + return (err); + err = zap_leaf_lookup(l, zn, &zeh); + if (err == 0) { + err = zap_entry_read(&zeh, integer_size, num_integers, buf); + (void) zap_entry_read_name(&zeh, rn_len, realname); + if (ncp) { + *ncp = zap_entry_normalization_conflict(&zeh, + zn, NULL, zn->zn_zap); + } + } + + zap_put_leaf(l); + return (err); +} + +int +fzap_add_cd(zap_name_t *zn, + uint64_t integer_size, uint64_t num_integers, + const void *val, uint32_t cd, dmu_tx_t *tx) +{ + zap_leaf_t *l; + int err; + zap_entry_handle_t zeh; + zap_t *zap = zn->zn_zap; + + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + ASSERT(!zap->zap_ismicro); + ASSERT(fzap_checksize(zn->zn_name_orij, + integer_size, num_integers) == 0); + + err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l); + if (err != 0) + return (err); +retry: + err = zap_leaf_lookup(l, zn, &zeh); + if (err == 0) { + err = EEXIST; + goto out; + } + if (err != ENOENT) + goto out; + + err = zap_entry_create(l, zn->zn_name_orij, zn->zn_hash, cd, + integer_size, num_integers, val, &zeh); + + if (err == 0) { + zap_increment_num_entries(zap, 1, tx); + } else if (err == EAGAIN) { + err = zap_expand_leaf(zn, l, tx, &l); + zap = zn->zn_zap; /* zap_expand_leaf() may change zap */ + if (err == 0) + goto retry; + } + +out: + if (zap != NULL) + zap_put_leaf_maybe_grow_ptrtbl(zn, l, tx); + return (err); +} + +int +fzap_add(zap_name_t *zn, + uint64_t integer_size, uint64_t num_integers, + const void *val, dmu_tx_t *tx) +{ + int err = fzap_checksize(zn->zn_name_orij, integer_size, num_integers); + if (err != 0) + return (err); + + return (fzap_add_cd(zn, integer_size, num_integers, + val, ZAP_MAXCD, tx)); +} + +int +fzap_update(zap_name_t *zn, + int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx) +{ + zap_leaf_t *l; + int err, create; + zap_entry_handle_t zeh; + zap_t *zap = zn->zn_zap; + + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + err = fzap_checksize(zn->zn_name_orij, integer_size, num_integers); + if (err != 0) + return (err); + + err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l); + if (err != 0) + return (err); +retry: + err = zap_leaf_lookup(l, zn, &zeh); + create = (err == ENOENT); + ASSERT(err == 0 || err == ENOENT); + + if (create) { + err = zap_entry_create(l, zn->zn_name_orij, zn->zn_hash, + ZAP_MAXCD, integer_size, num_integers, val, &zeh); + if (err == 0) + zap_increment_num_entries(zap, 1, tx); + } else { + err = zap_entry_update(&zeh, integer_size, num_integers, val); + } + + if (err == EAGAIN) { + err = zap_expand_leaf(zn, l, tx, &l); + zap = zn->zn_zap; /* zap_expand_leaf() may change zap */ + if (err == 0) + goto retry; + } + + if (zap != NULL) + zap_put_leaf_maybe_grow_ptrtbl(zn, l, tx); + return (err); +} + +int +fzap_length(zap_name_t *zn, + uint64_t *integer_size, uint64_t *num_integers) +{ + zap_leaf_t *l; + int err; + zap_entry_handle_t zeh; + + err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l); + if (err != 0) + return (err); + err = zap_leaf_lookup(l, zn, &zeh); + if (err != 0) + goto out; + + if (integer_size) + *integer_size = zeh.zeh_integer_size; + if (num_integers) + *num_integers = zeh.zeh_num_integers; +out: + zap_put_leaf(l); + return (err); +} + +int +fzap_remove(zap_name_t *zn, dmu_tx_t *tx) +{ + zap_leaf_t *l; + int err; + zap_entry_handle_t zeh; + + err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l); + if (err != 0) + return (err); + err = zap_leaf_lookup(l, zn, &zeh); + if (err == 0) { + zap_entry_remove(&zeh); + zap_increment_num_entries(zn->zn_zap, -1, tx); + } + zap_put_leaf(l); + return (err); +} + +/* + * Helper functions for consumers. + */ + +int +zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask, + char *name) +{ + zap_cursor_t zc; + zap_attribute_t *za; + int err; + + if (mask == 0) + mask = -1ULL; + + za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); + for (zap_cursor_init(&zc, os, zapobj); + (err = zap_cursor_retrieve(&zc, za)) == 0; + zap_cursor_advance(&zc)) { + if ((za->za_first_integer & mask) == (value & mask)) { + (void) strcpy(name, za->za_name); + break; + } + } + zap_cursor_fini(&zc); + kmem_free(za, sizeof (zap_attribute_t)); + return (err); +} + +int +zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx) +{ + zap_cursor_t zc; + zap_attribute_t za; + int err; + + for (zap_cursor_init(&zc, os, fromobj); + zap_cursor_retrieve(&zc, &za) == 0; + (void) zap_cursor_advance(&zc)) { + if (za.za_integer_length != 8 || za.za_num_integers != 1) + return (EINVAL); + err = zap_add(os, intoobj, za.za_name, + 8, 1, &za.za_first_integer, tx); + if (err) + return (err); + } + zap_cursor_fini(&zc); + return (0); +} + +int +zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx) +{ + char name[20]; + + (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); + return (zap_add(os, obj, name, 8, 1, &value, tx)); +} + +int +zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx) +{ + char name[20]; + + (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); + return (zap_remove(os, obj, name, tx)); +} + +int +zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value) +{ + char name[20]; + + (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value); + return (zap_lookup(os, obj, name, 8, 1, &value)); +} + +/* + * Routines for iterating over the attributes. + */ + +int +fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za) +{ + int err = ENOENT; + zap_entry_handle_t zeh; + zap_leaf_t *l; + + /* retrieve the next entry at or after zc_hash/zc_cd */ + /* if no entry, return ENOENT */ + + if (zc->zc_leaf && + (ZAP_HASH_IDX(zc->zc_hash, + zc->zc_leaf->l_phys->l_hdr.lh_prefix_len) != + zc->zc_leaf->l_phys->l_hdr.lh_prefix)) { + rw_enter(&zc->zc_leaf->l_rwlock, RW_READER); + zap_put_leaf(zc->zc_leaf); + zc->zc_leaf = NULL; + } + +again: + if (zc->zc_leaf == NULL) { + err = zap_deref_leaf(zap, zc->zc_hash, NULL, RW_READER, + &zc->zc_leaf); + if (err != 0) + return (err); + } else { + rw_enter(&zc->zc_leaf->l_rwlock, RW_READER); + } + l = zc->zc_leaf; + + err = zap_leaf_lookup_closest(l, zc->zc_hash, zc->zc_cd, &zeh); + + if (err == ENOENT) { + uint64_t nocare = + (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len)) - 1; + zc->zc_hash = (zc->zc_hash & ~nocare) + nocare + 1; + zc->zc_cd = 0; + if (l->l_phys->l_hdr.lh_prefix_len == 0 || zc->zc_hash == 0) { + zc->zc_hash = -1ULL; + } else { + zap_put_leaf(zc->zc_leaf); + zc->zc_leaf = NULL; + goto again; + } + } + + if (err == 0) { + zc->zc_hash = zeh.zeh_hash; + zc->zc_cd = zeh.zeh_cd; + za->za_integer_length = zeh.zeh_integer_size; + za->za_num_integers = zeh.zeh_num_integers; + if (zeh.zeh_num_integers == 0) { + za->za_first_integer = 0; + } else { + err = zap_entry_read(&zeh, 8, 1, &za->za_first_integer); + ASSERT(err == 0 || err == EOVERFLOW); + } + err = zap_entry_read_name(&zeh, + sizeof (za->za_name), za->za_name); + ASSERT(err == 0); + + za->za_normalization_conflict = + zap_entry_normalization_conflict(&zeh, + NULL, za->za_name, zap); + } + rw_exit(&zc->zc_leaf->l_rwlock); + return (err); +} + + +static void +zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs) +{ + int i, err; + uint64_t lastblk = 0; + + /* + * NB: if a leaf has more pointers than an entire ptrtbl block + * can hold, then it'll be accounted for more than once, since + * we won't have lastblk. + */ + for (i = 0; i < len; i++) { + zap_leaf_t *l; + + if (tbl[i] == lastblk) + continue; + lastblk = tbl[i]; + + err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l); + if (err == 0) { + zap_leaf_stats(zap, l, zs); + zap_put_leaf(l); + } + } +} + +void +fzap_get_stats(zap_t *zap, zap_stats_t *zs) +{ + int bs = FZAP_BLOCK_SHIFT(zap); + zs->zs_blocksize = 1ULL << bs; + + /* + * Set zap_phys_t fields + */ + zs->zs_num_leafs = zap->zap_f.zap_phys->zap_num_leafs; + zs->zs_num_entries = zap->zap_f.zap_phys->zap_num_entries; + zs->zs_num_blocks = zap->zap_f.zap_phys->zap_freeblk; + zs->zs_block_type = zap->zap_f.zap_phys->zap_block_type; + zs->zs_magic = zap->zap_f.zap_phys->zap_magic; + zs->zs_salt = zap->zap_f.zap_phys->zap_salt; + + /* + * Set zap_ptrtbl fields + */ + zs->zs_ptrtbl_len = 1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift; + zs->zs_ptrtbl_nextblk = zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk; + zs->zs_ptrtbl_blks_copied = + zap->zap_f.zap_phys->zap_ptrtbl.zt_blks_copied; + zs->zs_ptrtbl_zt_blk = zap->zap_f.zap_phys->zap_ptrtbl.zt_blk; + zs->zs_ptrtbl_zt_numblks = zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks; + zs->zs_ptrtbl_zt_shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift; + + if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) { + /* the ptrtbl is entirely in the header block. */ + zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), + 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs); + } else { + int b; + + dmu_prefetch(zap->zap_objset, zap->zap_object, + zap->zap_f.zap_phys->zap_ptrtbl.zt_blk << bs, + zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks << bs); + + for (b = 0; b < zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks; + b++) { + dmu_buf_t *db; + int err; + + err = dmu_buf_hold(zap->zap_objset, zap->zap_object, + (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk + b) << bs, + FTAG, &db); + if (err == 0) { + zap_stats_ptrtbl(zap, db->db_data, + 1<<(bs-3), zs); + dmu_buf_rele(db, FTAG); + } + } + } +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zap_leaf.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zap_leaf.c new file mode 100644 index 000000000000..da498b6bc9e3 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zap_leaf.c @@ -0,0 +1,853 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * The 512-byte leaf is broken into 32 16-byte chunks. + * chunk number n means l_chunk[n], even though the header precedes it. + * the names are stored null-terminated. + */ + +#include +#include +#include +#include +#include +#include + +static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry); + +#define CHAIN_END 0xffff /* end of the chunk chain */ + +/* half the (current) minimum block size */ +#define MAX_ARRAY_BYTES (8<<10) + +#define LEAF_HASH(l, h) \ + ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ + ((h) >> (64 - ZAP_LEAF_HASH_SHIFT(l)-(l)->l_phys->l_hdr.lh_prefix_len))) + +#define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)]) + + +static void +zap_memset(void *a, int c, size_t n) +{ + char *cp = a; + char *cpend = cp + n; + + while (cp < cpend) + *cp++ = c; +} + +static void +stv(int len, void *addr, uint64_t value) +{ + switch (len) { + case 1: + *(uint8_t *)addr = value; + return; + case 2: + *(uint16_t *)addr = value; + return; + case 4: + *(uint32_t *)addr = value; + return; + case 8: + *(uint64_t *)addr = value; + return; + } + ASSERT(!"bad int len"); +} + +static uint64_t +ldv(int len, const void *addr) +{ + switch (len) { + case 1: + return (*(uint8_t *)addr); + case 2: + return (*(uint16_t *)addr); + case 4: + return (*(uint32_t *)addr); + case 8: + return (*(uint64_t *)addr); + } + ASSERT(!"bad int len"); + return (0xFEEDFACEDEADBEEFULL); +} + +void +zap_leaf_byteswap(zap_leaf_phys_t *buf, int size) +{ + int i; + zap_leaf_t l; + l.l_bs = highbit(size)-1; + l.l_phys = buf; + + buf->l_hdr.lh_block_type = BSWAP_64(buf->l_hdr.lh_block_type); + buf->l_hdr.lh_prefix = BSWAP_64(buf->l_hdr.lh_prefix); + buf->l_hdr.lh_magic = BSWAP_32(buf->l_hdr.lh_magic); + buf->l_hdr.lh_nfree = BSWAP_16(buf->l_hdr.lh_nfree); + buf->l_hdr.lh_nentries = BSWAP_16(buf->l_hdr.lh_nentries); + buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len); + buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist); + + for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++) + buf->l_hash[i] = BSWAP_16(buf->l_hash[i]); + + for (i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) { + zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i); + struct zap_leaf_entry *le; + + switch (lc->l_free.lf_type) { + case ZAP_CHUNK_ENTRY: + le = &lc->l_entry; + + le->le_type = BSWAP_8(le->le_type); + le->le_int_size = BSWAP_8(le->le_int_size); + le->le_next = BSWAP_16(le->le_next); + le->le_name_chunk = BSWAP_16(le->le_name_chunk); + le->le_name_length = BSWAP_16(le->le_name_length); + le->le_value_chunk = BSWAP_16(le->le_value_chunk); + le->le_value_length = BSWAP_16(le->le_value_length); + le->le_cd = BSWAP_32(le->le_cd); + le->le_hash = BSWAP_64(le->le_hash); + break; + case ZAP_CHUNK_FREE: + lc->l_free.lf_type = BSWAP_8(lc->l_free.lf_type); + lc->l_free.lf_next = BSWAP_16(lc->l_free.lf_next); + break; + case ZAP_CHUNK_ARRAY: + lc->l_array.la_type = BSWAP_8(lc->l_array.la_type); + lc->l_array.la_next = BSWAP_16(lc->l_array.la_next); + /* la_array doesn't need swapping */ + break; + default: + ASSERT(!"bad leaf type"); + } + } +} + +void +zap_leaf_init(zap_leaf_t *l, boolean_t sort) +{ + int i; + + l->l_bs = highbit(l->l_dbuf->db_size)-1; + zap_memset(&l->l_phys->l_hdr, 0, sizeof (struct zap_leaf_header)); + zap_memset(l->l_phys->l_hash, CHAIN_END, 2*ZAP_LEAF_HASH_NUMENTRIES(l)); + for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { + ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE; + ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1; + } + ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)-1).l_free.lf_next = CHAIN_END; + l->l_phys->l_hdr.lh_block_type = ZBT_LEAF; + l->l_phys->l_hdr.lh_magic = ZAP_LEAF_MAGIC; + l->l_phys->l_hdr.lh_nfree = ZAP_LEAF_NUMCHUNKS(l); + if (sort) + l->l_phys->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED; +} + +/* + * Routines which manipulate leaf chunks (l_chunk[]). + */ + +static uint16_t +zap_leaf_chunk_alloc(zap_leaf_t *l) +{ + int chunk; + + ASSERT(l->l_phys->l_hdr.lh_nfree > 0); + + chunk = l->l_phys->l_hdr.lh_freelist; + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE); + + l->l_phys->l_hdr.lh_freelist = ZAP_LEAF_CHUNK(l, chunk).l_free.lf_next; + + l->l_phys->l_hdr.lh_nfree--; + + return (chunk); +} + +static void +zap_leaf_chunk_free(zap_leaf_t *l, uint16_t chunk) +{ + struct zap_leaf_free *zlf = &ZAP_LEAF_CHUNK(l, chunk).l_free; + ASSERT3U(l->l_phys->l_hdr.lh_nfree, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT(zlf->lf_type != ZAP_CHUNK_FREE); + + zlf->lf_type = ZAP_CHUNK_FREE; + zlf->lf_next = l->l_phys->l_hdr.lh_freelist; + bzero(zlf->lf_pad, sizeof (zlf->lf_pad)); /* help it to compress */ + l->l_phys->l_hdr.lh_freelist = chunk; + + l->l_phys->l_hdr.lh_nfree++; +} + +/* + * Routines which manipulate leaf arrays (zap_leaf_array type chunks). + */ + +static uint16_t +zap_leaf_array_create(zap_leaf_t *l, const char *buf, + int integer_size, int num_integers) +{ + uint16_t chunk_head; + uint16_t *chunkp = &chunk_head; + int byten = 0; + uint64_t value; + int shift = (integer_size-1)*8; + int len = num_integers; + + ASSERT3U(num_integers * integer_size, <, MAX_ARRAY_BYTES); + + while (len > 0) { + uint16_t chunk = zap_leaf_chunk_alloc(l); + struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; + int i; + + la->la_type = ZAP_CHUNK_ARRAY; + for (i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) { + if (byten == 0) + value = ldv(integer_size, buf); + la->la_array[i] = value >> shift; + value <<= 8; + if (++byten == integer_size) { + byten = 0; + buf += integer_size; + if (--len == 0) + break; + } + } + + *chunkp = chunk; + chunkp = &la->la_next; + } + *chunkp = CHAIN_END; + + return (chunk_head); +} + +static void +zap_leaf_array_free(zap_leaf_t *l, uint16_t *chunkp) +{ + uint16_t chunk = *chunkp; + + *chunkp = CHAIN_END; + + while (chunk != CHAIN_END) { + int nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next; + ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_array.la_type, ==, + ZAP_CHUNK_ARRAY); + zap_leaf_chunk_free(l, chunk); + chunk = nextchunk; + } +} + +/* array_len and buf_len are in integers, not bytes */ +static void +zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk, + int array_int_len, int array_len, int buf_int_len, uint64_t buf_len, + char *buf) +{ + int len = MIN(array_len, buf_len); + int byten = 0; + uint64_t value = 0; + + ASSERT3U(array_int_len, <=, buf_int_len); + + /* Fast path for one 8-byte integer */ + if (array_int_len == 8 && buf_int_len == 8 && len == 1) { + struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; + uint8_t *ip = la->la_array; + uint64_t *buf64 = (uint64_t *)buf; + + *buf64 = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 | + (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 | + (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 | + (uint64_t)ip[6] << 8 | (uint64_t)ip[7]; + return; + } + + /* Fast path for an array of 1-byte integers (eg. the entry name) */ + if (array_int_len == 1 && buf_int_len == 1 && + buf_len > array_len + ZAP_LEAF_ARRAY_BYTES) { + while (chunk != CHAIN_END) { + struct zap_leaf_array *la = + &ZAP_LEAF_CHUNK(l, chunk).l_array; + bcopy(la->la_array, buf, ZAP_LEAF_ARRAY_BYTES); + buf += ZAP_LEAF_ARRAY_BYTES; + chunk = la->la_next; + } + return; + } + + while (len > 0) { + struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; + int i; + + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { + value = (value << 8) | la->la_array[i]; + byten++; + if (byten == array_int_len) { + stv(buf_int_len, buf, value); + byten = 0; + len--; + if (len == 0) + return; + buf += buf_int_len; + } + } + chunk = la->la_next; + } +} + +/* + * Only to be used on 8-bit arrays. + * array_len is actual len in bytes (not encoded le_value_length). + * namenorm is null-terminated. + */ +static boolean_t +zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn, int chunk, int array_len) +{ + int bseen = 0; + + if (zn->zn_matchtype == MT_FIRST) { + char *thisname = kmem_alloc(array_len, KM_SLEEP); + boolean_t match; + + zap_leaf_array_read(l, chunk, 1, array_len, 1, + array_len, thisname); + match = zap_match(zn, thisname); + kmem_free(thisname, array_len); + return (match); + } + + /* Fast path for exact matching */ + while (bseen < array_len) { + struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array; + int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES); + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + if (bcmp(la->la_array, zn->zn_name_orij + bseen, toread)) + break; + chunk = la->la_next; + bseen += toread; + } + return (bseen == array_len); +} + +/* + * Routines which manipulate leaf entries. + */ + +int +zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh) +{ + uint16_t *chunkp; + struct zap_leaf_entry *le; + + ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); + +again: + for (chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash); + *chunkp != CHAIN_END; chunkp = &le->le_next) { + uint16_t chunk = *chunkp; + le = ZAP_LEAF_ENTRY(l, chunk); + + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + if (le->le_hash != zn->zn_hash) + continue; + + /* + * NB: the entry chain is always sorted by cd on + * normalized zap objects, so this will find the + * lowest-cd match for MT_FIRST. + */ + ASSERT(zn->zn_matchtype == MT_EXACT || + (l->l_phys->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED)); + if (zap_leaf_array_match(l, zn, le->le_name_chunk, + le->le_name_length)) { + zeh->zeh_num_integers = le->le_value_length; + zeh->zeh_integer_size = le->le_int_size; + zeh->zeh_cd = le->le_cd; + zeh->zeh_hash = le->le_hash; + zeh->zeh_chunkp = chunkp; + zeh->zeh_leaf = l; + return (0); + } + } + + /* + * NB: we could of course do this in one pass, but that would be + * a pain. We'll see if MT_BEST is even used much. + */ + if (zn->zn_matchtype == MT_BEST) { + zn->zn_matchtype = MT_FIRST; + goto again; + } + + return (ENOENT); +} + +/* Return (h1,cd1 >= h2,cd2) */ +#define HCD_GTEQ(h1, cd1, h2, cd2) \ + ((h1 > h2) ? TRUE : ((h1 == h2 && cd1 >= cd2) ? TRUE : FALSE)) + +int +zap_leaf_lookup_closest(zap_leaf_t *l, + uint64_t h, uint32_t cd, zap_entry_handle_t *zeh) +{ + uint16_t chunk; + uint64_t besth = -1ULL; + uint32_t bestcd = ZAP_MAXCD; + uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1; + uint16_t lh; + struct zap_leaf_entry *le; + + ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC); + + for (lh = LEAF_HASH(l, h); lh <= bestlh; lh++) { + for (chunk = l->l_phys->l_hash[lh]; + chunk != CHAIN_END; chunk = le->le_next) { + le = ZAP_LEAF_ENTRY(l, chunk); + + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + if (HCD_GTEQ(le->le_hash, le->le_cd, h, cd) && + HCD_GTEQ(besth, bestcd, le->le_hash, le->le_cd)) { + ASSERT3U(bestlh, >=, lh); + bestlh = lh; + besth = le->le_hash; + bestcd = le->le_cd; + + zeh->zeh_num_integers = le->le_value_length; + zeh->zeh_integer_size = le->le_int_size; + zeh->zeh_cd = le->le_cd; + zeh->zeh_hash = le->le_hash; + zeh->zeh_fakechunk = chunk; + zeh->zeh_chunkp = &zeh->zeh_fakechunk; + zeh->zeh_leaf = l; + } + } + } + + return (bestcd == ZAP_MAXCD ? ENOENT : 0); +} + +int +zap_entry_read(const zap_entry_handle_t *zeh, + uint8_t integer_size, uint64_t num_integers, void *buf) +{ + struct zap_leaf_entry *le = + ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + if (le->le_int_size > integer_size) + return (EINVAL); + + zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk, le->le_int_size, + le->le_value_length, integer_size, num_integers, buf); + + if (zeh->zeh_num_integers > num_integers) + return (EOVERFLOW); + return (0); + +} + +int +zap_entry_read_name(const zap_entry_handle_t *zeh, uint16_t buflen, char *buf) +{ + struct zap_leaf_entry *le = + ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 1, + le->le_name_length, 1, buflen, buf); + if (le->le_name_length > buflen) + return (EOVERFLOW); + return (0); +} + +int +zap_entry_update(zap_entry_handle_t *zeh, + uint8_t integer_size, uint64_t num_integers, const void *buf) +{ + int delta_chunks; + zap_leaf_t *l = zeh->zeh_leaf; + struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp); + + delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) - + ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_length * le->le_int_size); + + if ((int)l->l_phys->l_hdr.lh_nfree < delta_chunks) + return (EAGAIN); + + /* + * We should search other chained leaves (via + * zap_entry_remove,create?) otherwise returning EAGAIN will + * just send us into an infinite loop if we have to chain + * another leaf block, rather than being able to split this + * block. + */ + + zap_leaf_array_free(l, &le->le_value_chunk); + le->le_value_chunk = + zap_leaf_array_create(l, buf, integer_size, num_integers); + le->le_value_length = num_integers; + le->le_int_size = integer_size; + return (0); +} + +void +zap_entry_remove(zap_entry_handle_t *zeh) +{ + uint16_t entry_chunk; + struct zap_leaf_entry *le; + zap_leaf_t *l = zeh->zeh_leaf; + + ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk); + + entry_chunk = *zeh->zeh_chunkp; + le = ZAP_LEAF_ENTRY(l, entry_chunk); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + zap_leaf_array_free(l, &le->le_name_chunk); + zap_leaf_array_free(l, &le->le_value_chunk); + + *zeh->zeh_chunkp = le->le_next; + zap_leaf_chunk_free(l, entry_chunk); + + l->l_phys->l_hdr.lh_nentries--; +} + +int +zap_entry_create(zap_leaf_t *l, const char *name, uint64_t h, uint32_t cd, + uint8_t integer_size, uint64_t num_integers, const void *buf, + zap_entry_handle_t *zeh) +{ + uint16_t chunk; + uint16_t *chunkp; + struct zap_leaf_entry *le; + uint64_t namelen, valuelen; + int numchunks; + + valuelen = integer_size * num_integers; + namelen = strlen(name) + 1; + ASSERT(namelen >= 2); + + numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(namelen) + + ZAP_LEAF_ARRAY_NCHUNKS(valuelen); + if (numchunks > ZAP_LEAF_NUMCHUNKS(l)) + return (E2BIG); + + if (cd == ZAP_MAXCD) { + /* find the lowest unused cd */ + if (l->l_phys->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED) { + cd = 0; + + for (chunk = *LEAF_HASH_ENTPTR(l, h); + chunk != CHAIN_END; chunk = le->le_next) { + le = ZAP_LEAF_ENTRY(l, chunk); + if (le->le_cd > cd) + break; + if (le->le_hash == h) { + ASSERT3U(cd, ==, le->le_cd); + cd++; + } + } + } else { + /* old unsorted format; do it the O(n^2) way */ + for (cd = 0; cd < ZAP_MAXCD; cd++) { + for (chunk = *LEAF_HASH_ENTPTR(l, h); + chunk != CHAIN_END; chunk = le->le_next) { + le = ZAP_LEAF_ENTRY(l, chunk); + if (le->le_hash == h && + le->le_cd == cd) { + break; + } + } + /* If this cd is not in use, we are good. */ + if (chunk == CHAIN_END) + break; + } + } + /* + * we would run out of space in a block before we could + * have ZAP_MAXCD entries + */ + ASSERT3U(cd, <, ZAP_MAXCD); + } + + if (l->l_phys->l_hdr.lh_nfree < numchunks) + return (EAGAIN); + + /* make the entry */ + chunk = zap_leaf_chunk_alloc(l); + le = ZAP_LEAF_ENTRY(l, chunk); + le->le_type = ZAP_CHUNK_ENTRY; + le->le_name_chunk = zap_leaf_array_create(l, name, 1, namelen); + le->le_name_length = namelen; + le->le_value_chunk = + zap_leaf_array_create(l, buf, integer_size, num_integers); + le->le_value_length = num_integers; + le->le_int_size = integer_size; + le->le_hash = h; + le->le_cd = cd; + + /* link it into the hash chain */ + /* XXX if we did the search above, we could just use that */ + chunkp = zap_leaf_rehash_entry(l, chunk); + + l->l_phys->l_hdr.lh_nentries++; + + zeh->zeh_leaf = l; + zeh->zeh_num_integers = num_integers; + zeh->zeh_integer_size = le->le_int_size; + zeh->zeh_cd = le->le_cd; + zeh->zeh_hash = le->le_hash; + zeh->zeh_chunkp = chunkp; + + return (0); +} + +/* + * Determine if there is another entry with the same normalized form. + * For performance purposes, either zn or name must be provided (the + * other can be NULL). Note, there usually won't be any hash + * conflicts, in which case we don't need the concatenated/normalized + * form of the name. But all callers have one of these on hand anyway, + * so might as well take advantage. A cleaner but slower interface + * would accept neither argument, and compute the normalized name as + * needed (using zap_name_alloc(zap_entry_read_name(zeh))). + */ +boolean_t +zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn, + const char *name, zap_t *zap) +{ + uint64_t chunk; + struct zap_leaf_entry *le; + boolean_t allocdzn = B_FALSE; + + if (zap->zap_normflags == 0) + return (B_FALSE); + + for (chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash); + chunk != CHAIN_END; chunk = le->le_next) { + le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk); + if (le->le_hash != zeh->zeh_hash) + continue; + if (le->le_cd == zeh->zeh_cd) + continue; + + if (zn == NULL) { + zn = zap_name_alloc(zap, name, MT_FIRST); + allocdzn = B_TRUE; + } + if (zap_leaf_array_match(zeh->zeh_leaf, zn, + le->le_name_chunk, le->le_name_length)) { + if (allocdzn) + zap_name_free(zn); + return (B_TRUE); + } + } + if (allocdzn) + zap_name_free(zn); + return (B_FALSE); +} + +/* + * Routines for transferring entries between leafs. + */ + +static uint16_t * +zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry) +{ + struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry); + struct zap_leaf_entry *le2; + uint16_t *chunkp; + + /* + * keep the entry chain sorted by cd + * NB: this will not cause problems for unsorted leafs, though + * it is unnecessary there. + */ + for (chunkp = LEAF_HASH_ENTPTR(l, le->le_hash); + *chunkp != CHAIN_END; chunkp = &le2->le_next) { + le2 = ZAP_LEAF_ENTRY(l, *chunkp); + if (le2->le_cd > le->le_cd) + break; + } + + le->le_next = *chunkp; + *chunkp = entry; + return (chunkp); +} + +static uint16_t +zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl) +{ + uint16_t new_chunk; + uint16_t *nchunkp = &new_chunk; + + while (chunk != CHAIN_END) { + uint16_t nchunk = zap_leaf_chunk_alloc(nl); + struct zap_leaf_array *nla = + &ZAP_LEAF_CHUNK(nl, nchunk).l_array; + struct zap_leaf_array *la = + &ZAP_LEAF_CHUNK(l, chunk).l_array; + int nextchunk = la->la_next; + + ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l)); + ASSERT3U(nchunk, <, ZAP_LEAF_NUMCHUNKS(l)); + + *nla = *la; /* structure assignment */ + + zap_leaf_chunk_free(l, chunk); + chunk = nextchunk; + *nchunkp = nchunk; + nchunkp = &nla->la_next; + } + *nchunkp = CHAIN_END; + return (new_chunk); +} + +static void +zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl) +{ + struct zap_leaf_entry *le, *nle; + uint16_t chunk; + + le = ZAP_LEAF_ENTRY(l, entry); + ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY); + + chunk = zap_leaf_chunk_alloc(nl); + nle = ZAP_LEAF_ENTRY(nl, chunk); + *nle = *le; /* structure assignment */ + + (void) zap_leaf_rehash_entry(nl, chunk); + + nle->le_name_chunk = zap_leaf_transfer_array(l, le->le_name_chunk, nl); + nle->le_value_chunk = + zap_leaf_transfer_array(l, le->le_value_chunk, nl); + + zap_leaf_chunk_free(l, entry); + + l->l_phys->l_hdr.lh_nentries--; + nl->l_phys->l_hdr.lh_nentries++; +} + +/* + * Transfer the entries whose hash prefix ends in 1 to the new leaf. + */ +void +zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort) +{ + int i; + int bit = 64 - 1 - l->l_phys->l_hdr.lh_prefix_len; + + /* set new prefix and prefix_len */ + l->l_phys->l_hdr.lh_prefix <<= 1; + l->l_phys->l_hdr.lh_prefix_len++; + nl->l_phys->l_hdr.lh_prefix = l->l_phys->l_hdr.lh_prefix | 1; + nl->l_phys->l_hdr.lh_prefix_len = l->l_phys->l_hdr.lh_prefix_len; + + /* break existing hash chains */ + zap_memset(l->l_phys->l_hash, CHAIN_END, 2*ZAP_LEAF_HASH_NUMENTRIES(l)); + + if (sort) + l->l_phys->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED; + + /* + * Transfer entries whose hash bit 'bit' is set to nl; rehash + * the remaining entries + * + * NB: We could find entries via the hashtable instead. That + * would be O(hashents+numents) rather than O(numblks+numents), + * but this accesses memory more sequentially, and when we're + * called, the block is usually pretty full. + */ + for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) { + struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i); + if (le->le_type != ZAP_CHUNK_ENTRY) + continue; + + if (le->le_hash & (1ULL << bit)) + zap_leaf_transfer_entry(l, i, nl); + else + (void) zap_leaf_rehash_entry(l, i); + } +} + +void +zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs) +{ + int i, n; + + n = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift - + l->l_phys->l_hdr.lh_prefix_len; + n = MIN(n, ZAP_HISTOGRAM_SIZE-1); + zs->zs_leafs_with_2n_pointers[n]++; + + + n = l->l_phys->l_hdr.lh_nentries/5; + n = MIN(n, ZAP_HISTOGRAM_SIZE-1); + zs->zs_blocks_with_n5_entries[n]++; + + n = ((1<l_phys->l_hdr.lh_nfree * (ZAP_LEAF_ARRAY_BYTES+1))*10 / + (1<zs_blocks_n_tenths_full[n]++; + + for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) { + int nentries = 0; + int chunk = l->l_phys->l_hash[i]; + + while (chunk != CHAIN_END) { + struct zap_leaf_entry *le = + ZAP_LEAF_ENTRY(l, chunk); + + n = 1 + ZAP_LEAF_ARRAY_NCHUNKS(le->le_name_length) + + ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_length * + le->le_int_size); + n = MIN(n, ZAP_HISTOGRAM_SIZE-1); + zs->zs_entries_using_n_chunks[n]++; + + chunk = le->le_next; + nentries++; + } + + n = nentries; + n = MIN(n, ZAP_HISTOGRAM_SIZE-1); + zs->zs_buckets_with_n_entries[n]++; + } +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zap_micro.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zap_micro.c new file mode 100644 index 000000000000..abba42775bb7 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zap_micro.c @@ -0,0 +1,1069 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _KERNEL +#include +#endif + +static int mzap_upgrade(zap_t **zapp, dmu_tx_t *tx); + + +static uint64_t +zap_hash(zap_t *zap, const char *normname) +{ + const uint8_t *cp; + uint8_t c; + uint64_t crc = zap->zap_salt; + + /* NB: name must already be normalized, if necessary */ + + ASSERT(crc != 0); + ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); + for (cp = (const uint8_t *)normname; (c = *cp) != '\0'; cp++) { + crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ c) & 0xFF]; + } + + /* + * Only use 28 bits, since we need 4 bits in the cookie for the + * collision differentiator. We MUST use the high bits, since + * those are the ones that we first pay attention to when + * chosing the bucket. + */ + crc &= ~((1ULL << (64 - ZAP_HASHBITS)) - 1); + + return (crc); +} + +static int +zap_normalize(zap_t *zap, const char *name, char *namenorm) +{ + size_t inlen, outlen; + int err; + + inlen = strlen(name) + 1; + outlen = ZAP_MAXNAMELEN; + + err = 0; + (void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen, + zap->zap_normflags | U8_TEXTPREP_IGNORE_NULL, U8_UNICODE_LATEST, + &err); + + return (err); +} + +boolean_t +zap_match(zap_name_t *zn, const char *matchname) +{ + if (zn->zn_matchtype == MT_FIRST) { + char norm[ZAP_MAXNAMELEN]; + + if (zap_normalize(zn->zn_zap, matchname, norm) != 0) + return (B_FALSE); + + return (strcmp(zn->zn_name_norm, norm) == 0); + } else { + /* MT_BEST or MT_EXACT */ + return (strcmp(zn->zn_name_orij, matchname) == 0); + } +} + +void +zap_name_free(zap_name_t *zn) +{ + kmem_free(zn, sizeof (zap_name_t)); +} + +/* XXX combine this with zap_lockdir()? */ +zap_name_t * +zap_name_alloc(zap_t *zap, const char *name, matchtype_t mt) +{ + zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP); + + zn->zn_zap = zap; + zn->zn_name_orij = name; + zn->zn_matchtype = mt; + if (zap->zap_normflags) { + if (zap_normalize(zap, name, zn->zn_normbuf) != 0) { + zap_name_free(zn); + return (NULL); + } + zn->zn_name_norm = zn->zn_normbuf; + } else { + if (mt != MT_EXACT) { + zap_name_free(zn); + return (NULL); + } + zn->zn_name_norm = zn->zn_name_orij; + } + + zn->zn_hash = zap_hash(zap, zn->zn_name_norm); + return (zn); +} + +static void +mzap_byteswap(mzap_phys_t *buf, size_t size) +{ + int i, max; + buf->mz_block_type = BSWAP_64(buf->mz_block_type); + buf->mz_salt = BSWAP_64(buf->mz_salt); + buf->mz_normflags = BSWAP_64(buf->mz_normflags); + max = (size / MZAP_ENT_LEN) - 1; + for (i = 0; i < max; i++) { + buf->mz_chunk[i].mze_value = + BSWAP_64(buf->mz_chunk[i].mze_value); + buf->mz_chunk[i].mze_cd = + BSWAP_32(buf->mz_chunk[i].mze_cd); + } +} + +void +zap_byteswap(void *buf, size_t size) +{ + uint64_t block_type; + + block_type = *(uint64_t *)buf; + + if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) { + /* ASSERT(magic == ZAP_LEAF_MAGIC); */ + mzap_byteswap(buf, size); + } else { + fzap_byteswap(buf, size); + } +} + +static int +mze_compare(const void *arg1, const void *arg2) +{ + const mzap_ent_t *mze1 = arg1; + const mzap_ent_t *mze2 = arg2; + + if (mze1->mze_hash > mze2->mze_hash) + return (+1); + if (mze1->mze_hash < mze2->mze_hash) + return (-1); + if (mze1->mze_phys.mze_cd > mze2->mze_phys.mze_cd) + return (+1); + if (mze1->mze_phys.mze_cd < mze2->mze_phys.mze_cd) + return (-1); + return (0); +} + +static void +mze_insert(zap_t *zap, int chunkid, uint64_t hash, mzap_ent_phys_t *mzep) +{ + mzap_ent_t *mze; + + ASSERT(zap->zap_ismicro); + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + ASSERT(mzep->mze_cd < ZAP_MAXCD); + + mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP); + mze->mze_chunkid = chunkid; + mze->mze_hash = hash; + mze->mze_phys = *mzep; + avl_add(&zap->zap_m.zap_avl, mze); +} + +static mzap_ent_t * +mze_find(zap_name_t *zn) +{ + mzap_ent_t mze_tofind; + mzap_ent_t *mze; + avl_index_t idx; + avl_tree_t *avl = &zn->zn_zap->zap_m.zap_avl; + + ASSERT(zn->zn_zap->zap_ismicro); + ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock)); + + if (strlen(zn->zn_name_norm) >= sizeof (mze_tofind.mze_phys.mze_name)) + return (NULL); + + mze_tofind.mze_hash = zn->zn_hash; + mze_tofind.mze_phys.mze_cd = 0; + +again: + mze = avl_find(avl, &mze_tofind, &idx); + if (mze == NULL) + mze = avl_nearest(avl, idx, AVL_AFTER); + for (; mze && mze->mze_hash == zn->zn_hash; mze = AVL_NEXT(avl, mze)) { + if (zap_match(zn, mze->mze_phys.mze_name)) + return (mze); + } + if (zn->zn_matchtype == MT_BEST) { + zn->zn_matchtype = MT_FIRST; + goto again; + } + return (NULL); +} + +static uint32_t +mze_find_unused_cd(zap_t *zap, uint64_t hash) +{ + mzap_ent_t mze_tofind; + mzap_ent_t *mze; + avl_index_t idx; + avl_tree_t *avl = &zap->zap_m.zap_avl; + uint32_t cd; + + ASSERT(zap->zap_ismicro); + ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); + + mze_tofind.mze_hash = hash; + mze_tofind.mze_phys.mze_cd = 0; + + cd = 0; + for (mze = avl_find(avl, &mze_tofind, &idx); + mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) { + if (mze->mze_phys.mze_cd != cd) + break; + cd++; + } + + return (cd); +} + +static void +mze_remove(zap_t *zap, mzap_ent_t *mze) +{ + ASSERT(zap->zap_ismicro); + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + + avl_remove(&zap->zap_m.zap_avl, mze); + kmem_free(mze, sizeof (mzap_ent_t)); +} + +static void +mze_destroy(zap_t *zap) +{ + mzap_ent_t *mze; + void *avlcookie = NULL; + + while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)) + kmem_free(mze, sizeof (mzap_ent_t)); + avl_destroy(&zap->zap_m.zap_avl); +} + +static zap_t * +mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db) +{ + zap_t *winner; + zap_t *zap; + int i; + + ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t)); + + zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP); + rw_init(&zap->zap_rwlock, 0, 0, 0); + rw_enter(&zap->zap_rwlock, RW_WRITER); + zap->zap_objset = os; + zap->zap_object = obj; + zap->zap_dbuf = db; + + if (*(uint64_t *)db->db_data != ZBT_MICRO) { + mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0); + zap->zap_f.zap_block_shift = highbit(db->db_size) - 1; + } else { + zap->zap_ismicro = TRUE; + } + + /* + * Make sure that zap_ismicro is set before we let others see + * it, because zap_lockdir() checks zap_ismicro without the lock + * held. + */ + winner = dmu_buf_set_user(db, zap, &zap->zap_m.zap_phys, zap_evict); + + if (winner != NULL) { + rw_exit(&zap->zap_rwlock); + rw_destroy(&zap->zap_rwlock); + if (!zap->zap_ismicro) + mutex_destroy(&zap->zap_f.zap_num_entries_mtx); + kmem_free(zap, sizeof (zap_t)); + return (winner); + } + + if (zap->zap_ismicro) { + zap->zap_salt = zap->zap_m.zap_phys->mz_salt; + zap->zap_normflags = zap->zap_m.zap_phys->mz_normflags; + zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1; + avl_create(&zap->zap_m.zap_avl, mze_compare, + sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node)); + + for (i = 0; i < zap->zap_m.zap_num_chunks; i++) { + mzap_ent_phys_t *mze = + &zap->zap_m.zap_phys->mz_chunk[i]; + if (mze->mze_name[0]) { + zap_name_t *zn; + + zap->zap_m.zap_num_entries++; + zn = zap_name_alloc(zap, mze->mze_name, + MT_EXACT); + mze_insert(zap, i, zn->zn_hash, mze); + zap_name_free(zn); + } + } + } else { + zap->zap_salt = zap->zap_f.zap_phys->zap_salt; + zap->zap_normflags = zap->zap_f.zap_phys->zap_normflags; + + ASSERT3U(sizeof (struct zap_leaf_header), ==, + 2*ZAP_LEAF_CHUNKSIZE); + + /* + * The embedded pointer table should not overlap the + * other members. + */ + ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >, + &zap->zap_f.zap_phys->zap_salt); + + /* + * The embedded pointer table should end at the end of + * the block + */ + ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap, + 1<zap_f.zap_phys, ==, + zap->zap_dbuf->db_size); + } + rw_exit(&zap->zap_rwlock); + return (zap); +} + +int +zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx, + krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp) +{ + zap_t *zap; + dmu_buf_t *db; + krw_t lt; + int err; + + *zapp = NULL; + + err = dmu_buf_hold(os, obj, 0, NULL, &db); + if (err) + return (err); + +#ifdef ZFS_DEBUG + { + dmu_object_info_t doi; + dmu_object_info_from_db(db, &doi); + ASSERT(dmu_ot[doi.doi_type].ot_byteswap == zap_byteswap); + } +#endif + + zap = dmu_buf_get_user(db); + if (zap == NULL) + zap = mzap_open(os, obj, db); + + /* + * We're checking zap_ismicro without the lock held, in order to + * tell what type of lock we want. Once we have some sort of + * lock, see if it really is the right type. In practice this + * can only be different if it was upgraded from micro to fat, + * and micro wanted WRITER but fat only needs READER. + */ + lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti; + rw_enter(&zap->zap_rwlock, lt); + if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) { + /* it was upgraded, now we only need reader */ + ASSERT(lt == RW_WRITER); + ASSERT(RW_READER == + (!zap->zap_ismicro && fatreader) ? RW_READER : lti); + rw_downgrade(&zap->zap_rwlock); + lt = RW_READER; + } + + zap->zap_objset = os; + + if (lt == RW_WRITER) + dmu_buf_will_dirty(db, tx); + + ASSERT3P(zap->zap_dbuf, ==, db); + + ASSERT(!zap->zap_ismicro || + zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks); + if (zap->zap_ismicro && tx && adding && + zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) { + uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE; + if (newsz > MZAP_MAX_BLKSZ) { + dprintf("upgrading obj %llu: num_entries=%u\n", + obj, zap->zap_m.zap_num_entries); + *zapp = zap; + return (mzap_upgrade(zapp, tx)); + } + err = dmu_object_set_blocksize(os, obj, newsz, 0, tx); + ASSERT3U(err, ==, 0); + zap->zap_m.zap_num_chunks = + db->db_size / MZAP_ENT_LEN - 1; + } + + *zapp = zap; + return (0); +} + +void +zap_unlockdir(zap_t *zap) +{ + rw_exit(&zap->zap_rwlock); + dmu_buf_rele(zap->zap_dbuf, NULL); +} + +static int +mzap_upgrade(zap_t **zapp, dmu_tx_t *tx) +{ + mzap_phys_t *mzp; + int i, sz, nchunks, err; + zap_t *zap = *zapp; + + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + + sz = zap->zap_dbuf->db_size; + mzp = kmem_alloc(sz, KM_SLEEP); + bcopy(zap->zap_dbuf->db_data, mzp, sz); + nchunks = zap->zap_m.zap_num_chunks; + + err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object, + 1ULL << fzap_default_block_shift, 0, tx); + if (err) { + kmem_free(mzp, sz); + return (err); + } + + dprintf("upgrading obj=%llu with %u chunks\n", + zap->zap_object, nchunks); + /* XXX destroy the avl later, so we can use the stored hash value */ + mze_destroy(zap); + + fzap_upgrade(zap, tx); + + for (i = 0; i < nchunks; i++) { + int err; + mzap_ent_phys_t *mze = &mzp->mz_chunk[i]; + zap_name_t *zn; + if (mze->mze_name[0] == 0) + continue; + dprintf("adding %s=%llu\n", + mze->mze_name, mze->mze_value); + zn = zap_name_alloc(zap, mze->mze_name, MT_EXACT); + err = fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd, tx); + zap = zn->zn_zap; /* fzap_add_cd() may change zap */ + zap_name_free(zn); + if (err) + break; + } + kmem_free(mzp, sz); + *zapp = zap; + return (err); +} + +static void +mzap_create_impl(objset_t *os, uint64_t obj, int normflags, dmu_tx_t *tx) +{ + dmu_buf_t *db; + mzap_phys_t *zp; + + VERIFY(0 == dmu_buf_hold(os, obj, 0, FTAG, &db)); + +#ifdef ZFS_DEBUG + { + dmu_object_info_t doi; + dmu_object_info_from_db(db, &doi); + ASSERT(dmu_ot[doi.doi_type].ot_byteswap == zap_byteswap); + } +#endif + + dmu_buf_will_dirty(db, tx); + zp = db->db_data; + zp->mz_block_type = ZBT_MICRO; + zp->mz_salt = ((uintptr_t)db ^ (uintptr_t)tx ^ (obj << 1)) | 1ULL; + zp->mz_normflags = normflags; + dmu_buf_rele(db, FTAG); +} + +int +zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + return (zap_create_claim_norm(os, obj, + 0, ot, bonustype, bonuslen, tx)); +} + +int +zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags, + dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + int err; + + err = dmu_object_claim(os, obj, ot, 0, bonustype, bonuslen, tx); + if (err != 0) + return (err); + mzap_create_impl(os, obj, normflags, tx); + return (0); +} + +uint64_t +zap_create(objset_t *os, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx)); +} + +uint64_t +zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot, + dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) +{ + uint64_t obj = dmu_object_alloc(os, ot, 0, bonustype, bonuslen, tx); + + mzap_create_impl(os, obj, normflags, tx); + return (obj); +} + +int +zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx) +{ + /* + * dmu_object_free will free the object number and free the + * data. Freeing the data will cause our pageout function to be + * called, which will destroy our data (zap_leaf_t's and zap_t). + */ + + return (dmu_object_free(os, zapobj, tx)); +} + +_NOTE(ARGSUSED(0)) +void +zap_evict(dmu_buf_t *db, void *vzap) +{ + zap_t *zap = vzap; + + rw_destroy(&zap->zap_rwlock); + + if (zap->zap_ismicro) + mze_destroy(zap); + else + mutex_destroy(&zap->zap_f.zap_num_entries_mtx); + + kmem_free(zap, sizeof (zap_t)); +} + +int +zap_count(objset_t *os, uint64_t zapobj, uint64_t *count) +{ + zap_t *zap; + int err; + + err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap); + if (err) + return (err); + if (!zap->zap_ismicro) { + err = fzap_count(zap, count); + } else { + *count = zap->zap_m.zap_num_entries; + } + zap_unlockdir(zap); + return (err); +} + +/* + * zn may be NULL; if not specified, it will be computed if needed. + * See also the comment above zap_entry_normalization_conflict(). + */ +static boolean_t +mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze) +{ + mzap_ent_t *other; + int direction = AVL_BEFORE; + boolean_t allocdzn = B_FALSE; + + if (zap->zap_normflags == 0) + return (B_FALSE); + +again: + for (other = avl_walk(&zap->zap_m.zap_avl, mze, direction); + other && other->mze_hash == mze->mze_hash; + other = avl_walk(&zap->zap_m.zap_avl, other, direction)) { + + if (zn == NULL) { + zn = zap_name_alloc(zap, mze->mze_phys.mze_name, + MT_FIRST); + allocdzn = B_TRUE; + } + if (zap_match(zn, other->mze_phys.mze_name)) { + if (allocdzn) + zap_name_free(zn); + return (B_TRUE); + } + } + + if (direction == AVL_BEFORE) { + direction = AVL_AFTER; + goto again; + } + + if (allocdzn) + zap_name_free(zn); + return (B_FALSE); +} + +/* + * Routines for manipulating attributes. + */ + +int +zap_lookup(objset_t *os, uint64_t zapobj, const char *name, + uint64_t integer_size, uint64_t num_integers, void *buf) +{ + return (zap_lookup_norm(os, zapobj, name, integer_size, + num_integers, buf, MT_EXACT, NULL, 0, NULL)); +} + +int +zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name, + uint64_t integer_size, uint64_t num_integers, void *buf, + matchtype_t mt, char *realname, int rn_len, + boolean_t *ncp) +{ + zap_t *zap; + int err; + mzap_ent_t *mze; + zap_name_t *zn; + + err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap); + if (err) + return (err); + zn = zap_name_alloc(zap, name, mt); + if (zn == NULL) { + zap_unlockdir(zap); + return (ENOTSUP); + } + + if (!zap->zap_ismicro) { + err = fzap_lookup(zn, integer_size, num_integers, buf, + realname, rn_len, ncp); + } else { + mze = mze_find(zn); + if (mze == NULL) { + err = ENOENT; + } else { + if (num_integers < 1) { + err = EOVERFLOW; + } else if (integer_size != 8) { + err = EINVAL; + } else { + *(uint64_t *)buf = mze->mze_phys.mze_value; + (void) strlcpy(realname, + mze->mze_phys.mze_name, rn_len); + if (ncp) { + *ncp = mzap_normalization_conflict(zap, + zn, mze); + } + } + } + } + zap_name_free(zn); + zap_unlockdir(zap); + return (err); +} + +int +zap_length(objset_t *os, uint64_t zapobj, const char *name, + uint64_t *integer_size, uint64_t *num_integers) +{ + zap_t *zap; + int err; + mzap_ent_t *mze; + zap_name_t *zn; + + err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap); + if (err) + return (err); + zn = zap_name_alloc(zap, name, MT_EXACT); + if (zn == NULL) { + zap_unlockdir(zap); + return (ENOTSUP); + } + if (!zap->zap_ismicro) { + err = fzap_length(zn, integer_size, num_integers); + } else { + mze = mze_find(zn); + if (mze == NULL) { + err = ENOENT; + } else { + if (integer_size) + *integer_size = 8; + if (num_integers) + *num_integers = 1; + } + } + zap_name_free(zn); + zap_unlockdir(zap); + return (err); +} + +static void +mzap_addent(zap_name_t *zn, uint64_t value) +{ + int i; + zap_t *zap = zn->zn_zap; + int start = zap->zap_m.zap_alloc_next; + uint32_t cd; + + dprintf("obj=%llu %s=%llu\n", zap->zap_object, + zn->zn_name_orij, value); + ASSERT(RW_WRITE_HELD(&zap->zap_rwlock)); + +#ifdef ZFS_DEBUG + for (i = 0; i < zap->zap_m.zap_num_chunks; i++) { + mzap_ent_phys_t *mze = &zap->zap_m.zap_phys->mz_chunk[i]; + ASSERT(strcmp(zn->zn_name_orij, mze->mze_name) != 0); + } +#endif + + cd = mze_find_unused_cd(zap, zn->zn_hash); + /* given the limited size of the microzap, this can't happen */ + ASSERT(cd != ZAP_MAXCD); + +again: + for (i = start; i < zap->zap_m.zap_num_chunks; i++) { + mzap_ent_phys_t *mze = &zap->zap_m.zap_phys->mz_chunk[i]; + if (mze->mze_name[0] == 0) { + mze->mze_value = value; + mze->mze_cd = cd; + (void) strcpy(mze->mze_name, zn->zn_name_orij); + zap->zap_m.zap_num_entries++; + zap->zap_m.zap_alloc_next = i+1; + if (zap->zap_m.zap_alloc_next == + zap->zap_m.zap_num_chunks) + zap->zap_m.zap_alloc_next = 0; + mze_insert(zap, i, zn->zn_hash, mze); + return; + } + } + if (start != 0) { + start = 0; + goto again; + } + ASSERT(!"out of entries!"); +} + +int +zap_add(objset_t *os, uint64_t zapobj, const char *name, + int integer_size, uint64_t num_integers, + const void *val, dmu_tx_t *tx) +{ + zap_t *zap; + int err; + mzap_ent_t *mze; + const uint64_t *intval = val; + zap_name_t *zn; + + err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap); + if (err) + return (err); + zn = zap_name_alloc(zap, name, MT_EXACT); + if (zn == NULL) { + zap_unlockdir(zap); + return (ENOTSUP); + } + if (!zap->zap_ismicro) { + err = fzap_add(zn, integer_size, num_integers, val, tx); + zap = zn->zn_zap; /* fzap_add() may change zap */ + } else if (integer_size != 8 || num_integers != 1 || + strlen(name) >= MZAP_NAME_LEN) { + dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n", + zapobj, integer_size, num_integers, name); + err = mzap_upgrade(&zn->zn_zap, tx); + if (err == 0) + err = fzap_add(zn, integer_size, num_integers, val, tx); + zap = zn->zn_zap; /* fzap_add() may change zap */ + } else { + mze = mze_find(zn); + if (mze != NULL) { + err = EEXIST; + } else { + mzap_addent(zn, *intval); + } + } + ASSERT(zap == zn->zn_zap); + zap_name_free(zn); + if (zap != NULL) /* may be NULL if fzap_add() failed */ + zap_unlockdir(zap); + return (err); +} + +int +zap_update(objset_t *os, uint64_t zapobj, const char *name, + int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx) +{ + zap_t *zap; + mzap_ent_t *mze; + const uint64_t *intval = val; + zap_name_t *zn; + int err; + + err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, &zap); + if (err) + return (err); + zn = zap_name_alloc(zap, name, MT_EXACT); + if (zn == NULL) { + zap_unlockdir(zap); + return (ENOTSUP); + } + if (!zap->zap_ismicro) { + err = fzap_update(zn, integer_size, num_integers, val, tx); + zap = zn->zn_zap; /* fzap_update() may change zap */ + } else if (integer_size != 8 || num_integers != 1 || + strlen(name) >= MZAP_NAME_LEN) { + dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n", + zapobj, integer_size, num_integers, name); + err = mzap_upgrade(&zn->zn_zap, tx); + if (err == 0) + err = fzap_update(zn, integer_size, num_integers, + val, tx); + zap = zn->zn_zap; /* fzap_update() may change zap */ + } else { + mze = mze_find(zn); + if (mze != NULL) { + mze->mze_phys.mze_value = *intval; + zap->zap_m.zap_phys->mz_chunk + [mze->mze_chunkid].mze_value = *intval; + } else { + mzap_addent(zn, *intval); + } + } + ASSERT(zap == zn->zn_zap); + zap_name_free(zn); + if (zap != NULL) /* may be NULL if fzap_upgrade() failed */ + zap_unlockdir(zap); + return (err); +} + +int +zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx) +{ + return (zap_remove_norm(os, zapobj, name, MT_EXACT, tx)); +} + +int +zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name, + matchtype_t mt, dmu_tx_t *tx) +{ + zap_t *zap; + int err; + mzap_ent_t *mze; + zap_name_t *zn; + + err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, &zap); + if (err) + return (err); + zn = zap_name_alloc(zap, name, mt); + if (zn == NULL) { + zap_unlockdir(zap); + return (ENOTSUP); + } + if (!zap->zap_ismicro) { + err = fzap_remove(zn, tx); + } else { + mze = mze_find(zn); + if (mze == NULL) { + err = ENOENT; + } else { + zap->zap_m.zap_num_entries--; + bzero(&zap->zap_m.zap_phys->mz_chunk[mze->mze_chunkid], + sizeof (mzap_ent_phys_t)); + mze_remove(zap, mze); + } + } + zap_name_free(zn); + zap_unlockdir(zap); + return (err); +} + +/* + * Routines for iterating over the attributes. + */ + +/* + * We want to keep the high 32 bits of the cursor zero if we can, so + * that 32-bit programs can access this. So use a small hash value so + * we can fit 4 bits of cd into the 32-bit cursor. + * + * [ 4 zero bits | 32-bit collision differentiator | 28-bit hash value ] + */ +void +zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj, + uint64_t serialized) +{ + zc->zc_objset = os; + zc->zc_zap = NULL; + zc->zc_leaf = NULL; + zc->zc_zapobj = zapobj; + if (serialized == -1ULL) { + zc->zc_hash = -1ULL; + zc->zc_cd = 0; + } else { + zc->zc_hash = serialized << (64-ZAP_HASHBITS); + zc->zc_cd = serialized >> ZAP_HASHBITS; + if (zc->zc_cd >= ZAP_MAXCD) /* corrupt serialized */ + zc->zc_cd = 0; + } +} + +void +zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj) +{ + zap_cursor_init_serialized(zc, os, zapobj, 0); +} + +void +zap_cursor_fini(zap_cursor_t *zc) +{ + if (zc->zc_zap) { + rw_enter(&zc->zc_zap->zap_rwlock, RW_READER); + zap_unlockdir(zc->zc_zap); + zc->zc_zap = NULL; + } + if (zc->zc_leaf) { + rw_enter(&zc->zc_leaf->l_rwlock, RW_READER); + zap_put_leaf(zc->zc_leaf); + zc->zc_leaf = NULL; + } + zc->zc_objset = NULL; +} + +uint64_t +zap_cursor_serialize(zap_cursor_t *zc) +{ + if (zc->zc_hash == -1ULL) + return (-1ULL); + ASSERT((zc->zc_hash & (ZAP_MAXCD-1)) == 0); + ASSERT(zc->zc_cd < ZAP_MAXCD); + return ((zc->zc_hash >> (64-ZAP_HASHBITS)) | + ((uint64_t)zc->zc_cd << ZAP_HASHBITS)); +} + +int +zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za) +{ + int err; + avl_index_t idx; + mzap_ent_t mze_tofind; + mzap_ent_t *mze; + + if (zc->zc_hash == -1ULL) + return (ENOENT); + + if (zc->zc_zap == NULL) { + err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL, + RW_READER, TRUE, FALSE, &zc->zc_zap); + if (err) + return (err); + } else { + rw_enter(&zc->zc_zap->zap_rwlock, RW_READER); + } + if (!zc->zc_zap->zap_ismicro) { + err = fzap_cursor_retrieve(zc->zc_zap, zc, za); + } else { + err = ENOENT; + + mze_tofind.mze_hash = zc->zc_hash; + mze_tofind.mze_phys.mze_cd = zc->zc_cd; + + mze = avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx); + if (mze == NULL) { + mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl, + idx, AVL_AFTER); + } + if (mze) { + ASSERT(0 == bcmp(&mze->mze_phys, + &zc->zc_zap->zap_m.zap_phys->mz_chunk + [mze->mze_chunkid], sizeof (mze->mze_phys))); + + za->za_normalization_conflict = + mzap_normalization_conflict(zc->zc_zap, NULL, mze); + za->za_integer_length = 8; + za->za_num_integers = 1; + za->za_first_integer = mze->mze_phys.mze_value; + (void) strcpy(za->za_name, mze->mze_phys.mze_name); + zc->zc_hash = mze->mze_hash; + zc->zc_cd = mze->mze_phys.mze_cd; + err = 0; + } else { + zc->zc_hash = -1ULL; + } + } + rw_exit(&zc->zc_zap->zap_rwlock); + return (err); +} + +void +zap_cursor_advance(zap_cursor_t *zc) +{ + if (zc->zc_hash == -1ULL) + return; + zc->zc_cd++; + if (zc->zc_cd >= ZAP_MAXCD) { + zc->zc_cd = 0; + zc->zc_hash += 1ULL<<(64-ZAP_HASHBITS); + if (zc->zc_hash == 0) /* EOF */ + zc->zc_hash = -1ULL; + } +} + +int +zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs) +{ + int err; + zap_t *zap; + + err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, &zap); + if (err) + return (err); + + bzero(zs, sizeof (zap_stats_t)); + + if (zap->zap_ismicro) { + zs->zs_blocksize = zap->zap_dbuf->db_size; + zs->zs_num_entries = zap->zap_m.zap_num_entries; + zs->zs_num_blocks = 1; + } else { + fzap_get_stats(zap, zs); + } + zap_unlockdir(zap); + return (0); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_acl.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_acl.c new file mode 100644 index 000000000000..341dc4dfe743 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_acl.c @@ -0,0 +1,2680 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fs/fs_subr.h" +#include + +#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE +#define DENY ACE_ACCESS_DENIED_ACE_TYPE +#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE +#define MIN_ACE_TYPE ALLOW + +#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP) +#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \ + ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE) +#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \ + ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) +#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \ + ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS) +#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS) + +#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \ + ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \ + ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \ + ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE) + +#define WRITE_MASK (WRITE_MASK_DATA|ACE_WRITE_ATTRIBUTES|ACE_WRITE_ACL|\ + ACE_WRITE_OWNER|ACE_DELETE|ACE_DELETE_CHILD) + +#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ + ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) + +#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \ + ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE) + +#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \ + ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE) + +#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER) + +#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\ + ZFS_ACL_PROTECTED) + +#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\ + ZFS_ACL_OBJ_ACE) + +static uint16_t +zfs_ace_v0_get_type(void *acep) +{ + return (((zfs_oldace_t *)acep)->z_type); +} + +static uint16_t +zfs_ace_v0_get_flags(void *acep) +{ + return (((zfs_oldace_t *)acep)->z_flags); +} + +static uint32_t +zfs_ace_v0_get_mask(void *acep) +{ + return (((zfs_oldace_t *)acep)->z_access_mask); +} + +static uint64_t +zfs_ace_v0_get_who(void *acep) +{ + return (((zfs_oldace_t *)acep)->z_fuid); +} + +static void +zfs_ace_v0_set_type(void *acep, uint16_t type) +{ + ((zfs_oldace_t *)acep)->z_type = type; +} + +static void +zfs_ace_v0_set_flags(void *acep, uint16_t flags) +{ + ((zfs_oldace_t *)acep)->z_flags = flags; +} + +static void +zfs_ace_v0_set_mask(void *acep, uint32_t mask) +{ + ((zfs_oldace_t *)acep)->z_access_mask = mask; +} + +static void +zfs_ace_v0_set_who(void *acep, uint64_t who) +{ + ((zfs_oldace_t *)acep)->z_fuid = who; +} + +/*ARGSUSED*/ +static size_t +zfs_ace_v0_size(void *acep) +{ + return (sizeof (zfs_oldace_t)); +} + +static size_t +zfs_ace_v0_abstract_size(void) +{ + return (sizeof (zfs_oldace_t)); +} + +static int +zfs_ace_v0_mask_off(void) +{ + return (offsetof(zfs_oldace_t, z_access_mask)); +} + +/*ARGSUSED*/ +static int +zfs_ace_v0_data(void *acep, void **datap) +{ + *datap = NULL; + return (0); +} + +static acl_ops_t zfs_acl_v0_ops = { + zfs_ace_v0_get_mask, + zfs_ace_v0_set_mask, + zfs_ace_v0_get_flags, + zfs_ace_v0_set_flags, + zfs_ace_v0_get_type, + zfs_ace_v0_set_type, + zfs_ace_v0_get_who, + zfs_ace_v0_set_who, + zfs_ace_v0_size, + zfs_ace_v0_abstract_size, + zfs_ace_v0_mask_off, + zfs_ace_v0_data +}; + +static uint16_t +zfs_ace_fuid_get_type(void *acep) +{ + return (((zfs_ace_hdr_t *)acep)->z_type); +} + +static uint16_t +zfs_ace_fuid_get_flags(void *acep) +{ + return (((zfs_ace_hdr_t *)acep)->z_flags); +} + +static uint32_t +zfs_ace_fuid_get_mask(void *acep) +{ + return (((zfs_ace_hdr_t *)acep)->z_access_mask); +} + +static uint64_t +zfs_ace_fuid_get_who(void *args) +{ + uint16_t entry_type; + zfs_ace_t *acep = args; + + entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; + + if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || + entry_type == ACE_EVERYONE) + return (-1); + return (((zfs_ace_t *)acep)->z_fuid); +} + +static void +zfs_ace_fuid_set_type(void *acep, uint16_t type) +{ + ((zfs_ace_hdr_t *)acep)->z_type = type; +} + +static void +zfs_ace_fuid_set_flags(void *acep, uint16_t flags) +{ + ((zfs_ace_hdr_t *)acep)->z_flags = flags; +} + +static void +zfs_ace_fuid_set_mask(void *acep, uint32_t mask) +{ + ((zfs_ace_hdr_t *)acep)->z_access_mask = mask; +} + +static void +zfs_ace_fuid_set_who(void *arg, uint64_t who) +{ + zfs_ace_t *acep = arg; + + uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS; + + if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP || + entry_type == ACE_EVERYONE) + return; + acep->z_fuid = who; +} + +static size_t +zfs_ace_fuid_size(void *acep) +{ + zfs_ace_hdr_t *zacep = acep; + uint16_t entry_type; + + switch (zacep->z_type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + return (sizeof (zfs_object_ace_t)); + case ALLOW: + case DENY: + entry_type = + (((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS); + if (entry_type == ACE_OWNER || + entry_type == OWNING_GROUP || + entry_type == ACE_EVERYONE) + return (sizeof (zfs_ace_hdr_t)); + /*FALLTHROUGH*/ + default: + return (sizeof (zfs_ace_t)); + } +} + +static size_t +zfs_ace_fuid_abstract_size(void) +{ + return (sizeof (zfs_ace_hdr_t)); +} + +static int +zfs_ace_fuid_mask_off(void) +{ + return (offsetof(zfs_ace_hdr_t, z_access_mask)); +} + +static int +zfs_ace_fuid_data(void *acep, void **datap) +{ + zfs_ace_t *zacep = acep; + zfs_object_ace_t *zobjp; + + switch (zacep->z_hdr.z_type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + zobjp = acep; + *datap = (caddr_t)zobjp + sizeof (zfs_ace_t); + return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t)); + default: + *datap = NULL; + return (0); + } +} + +static acl_ops_t zfs_acl_fuid_ops = { + zfs_ace_fuid_get_mask, + zfs_ace_fuid_set_mask, + zfs_ace_fuid_get_flags, + zfs_ace_fuid_set_flags, + zfs_ace_fuid_get_type, + zfs_ace_fuid_set_type, + zfs_ace_fuid_get_who, + zfs_ace_fuid_set_who, + zfs_ace_fuid_size, + zfs_ace_fuid_abstract_size, + zfs_ace_fuid_mask_off, + zfs_ace_fuid_data +}; + +static int +zfs_acl_version(int version) +{ + if (version < ZPL_VERSION_FUID) + return (ZFS_ACL_VERSION_INITIAL); + else + return (ZFS_ACL_VERSION_FUID); +} + +static int +zfs_acl_version_zp(znode_t *zp) +{ + return (zfs_acl_version(zp->z_zfsvfs->z_version)); +} + +static zfs_acl_t * +zfs_acl_alloc(int vers) +{ + zfs_acl_t *aclp; + + aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP); + list_create(&aclp->z_acl, sizeof (zfs_acl_node_t), + offsetof(zfs_acl_node_t, z_next)); + aclp->z_version = vers; + if (vers == ZFS_ACL_VERSION_FUID) + aclp->z_ops = zfs_acl_fuid_ops; + else + aclp->z_ops = zfs_acl_v0_ops; + return (aclp); +} + +static zfs_acl_node_t * +zfs_acl_node_alloc(size_t bytes) +{ + zfs_acl_node_t *aclnode; + + aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP); + if (bytes) { + aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP); + aclnode->z_allocdata = aclnode->z_acldata; + aclnode->z_allocsize = bytes; + aclnode->z_size = bytes; + } + + return (aclnode); +} + +static void +zfs_acl_node_free(zfs_acl_node_t *aclnode) +{ + if (aclnode->z_allocsize) + kmem_free(aclnode->z_allocdata, aclnode->z_allocsize); + kmem_free(aclnode, sizeof (zfs_acl_node_t)); +} + +static void +zfs_acl_release_nodes(zfs_acl_t *aclp) +{ + zfs_acl_node_t *aclnode; + + while (aclnode = list_head(&aclp->z_acl)) { + list_remove(&aclp->z_acl, aclnode); + zfs_acl_node_free(aclnode); + } + aclp->z_acl_count = 0; + aclp->z_acl_bytes = 0; +} + +void +zfs_acl_free(zfs_acl_t *aclp) +{ + zfs_acl_release_nodes(aclp); + list_destroy(&aclp->z_acl); + kmem_free(aclp, sizeof (zfs_acl_t)); +} + +static boolean_t +zfs_acl_valid_ace_type(uint_t type, uint_t flags) +{ + uint16_t entry_type; + + switch (type) { + case ALLOW: + case DENY: + case ACE_SYSTEM_AUDIT_ACE_TYPE: + case ACE_SYSTEM_ALARM_ACE_TYPE: + entry_type = flags & ACE_TYPE_FLAGS; + return (entry_type == ACE_OWNER || + entry_type == OWNING_GROUP || + entry_type == ACE_EVERYONE || entry_type == 0 || + entry_type == ACE_IDENTIFIER_GROUP); + default: + if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE) + return (B_TRUE); + } + return (B_FALSE); +} + +static boolean_t +zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags) +{ + /* + * first check type of entry + */ + + if (!zfs_acl_valid_ace_type(type, iflags)) + return (B_FALSE); + + switch (type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + if (aclp->z_version < ZFS_ACL_VERSION_FUID) + return (B_FALSE); + aclp->z_hints |= ZFS_ACL_OBJ_ACE; + } + + /* + * next check inheritance level flags + */ + + if (obj_type == VDIR && + (iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE))) + aclp->z_hints |= ZFS_INHERIT_ACE; + + if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) { + if ((iflags & (ACE_FILE_INHERIT_ACE| + ACE_DIRECTORY_INHERIT_ACE)) == 0) { + return (B_FALSE); + } + } + + return (B_TRUE); +} + +static void * +zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who, + uint32_t *access_mask, uint16_t *iflags, uint16_t *type) +{ + zfs_acl_node_t *aclnode; + + if (start == NULL) { + aclnode = list_head(&aclp->z_acl); + if (aclnode == NULL) + return (NULL); + + aclp->z_next_ace = aclnode->z_acldata; + aclp->z_curr_node = aclnode; + aclnode->z_ace_idx = 0; + } + + aclnode = aclp->z_curr_node; + + if (aclnode == NULL) + return (NULL); + + if (aclnode->z_ace_idx >= aclnode->z_ace_count) { + aclnode = list_next(&aclp->z_acl, aclnode); + if (aclnode == NULL) + return (NULL); + else { + aclp->z_curr_node = aclnode; + aclnode->z_ace_idx = 0; + aclp->z_next_ace = aclnode->z_acldata; + } + } + + if (aclnode->z_ace_idx < aclnode->z_ace_count) { + void *acep = aclp->z_next_ace; + size_t ace_size; + + /* + * Make sure we don't overstep our bounds + */ + ace_size = aclp->z_ops.ace_size(acep); + + if (((caddr_t)acep + ace_size) > + ((caddr_t)aclnode->z_acldata + aclnode->z_size)) { + return (NULL); + } + + *iflags = aclp->z_ops.ace_flags_get(acep); + *type = aclp->z_ops.ace_type_get(acep); + *access_mask = aclp->z_ops.ace_mask_get(acep); + *who = aclp->z_ops.ace_who_get(acep); + aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size; + aclnode->z_ace_idx++; + return ((void *)acep); + } + return (NULL); +} + +/*ARGSUSED*/ +static uint64_t +zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt, + uint16_t *flags, uint16_t *type, uint32_t *mask) +{ + zfs_acl_t *aclp = datap; + zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie; + uint64_t who; + + acep = zfs_acl_next_ace(aclp, acep, &who, mask, + flags, type); + return ((uint64_t)(uintptr_t)acep); +} + +static zfs_acl_node_t * +zfs_acl_curr_node(zfs_acl_t *aclp) +{ + ASSERT(aclp->z_curr_node); + return (aclp->z_curr_node); +} + +/* + * Copy ACE to internal ZFS format. + * While processing the ACL each ACE will be validated for correctness. + * ACE FUIDs will be created later. + */ +int +zfs_copy_ace_2_fuid(vtype_t obj_type, zfs_acl_t *aclp, void *datap, + zfs_ace_t *z_acl, int aclcnt, size_t *size) +{ + int i; + uint16_t entry_type; + zfs_ace_t *aceptr = z_acl; + ace_t *acep = datap; + zfs_object_ace_t *zobjacep; + ace_object_t *aceobjp; + + for (i = 0; i != aclcnt; i++) { + aceptr->z_hdr.z_access_mask = acep->a_access_mask; + aceptr->z_hdr.z_flags = acep->a_flags; + aceptr->z_hdr.z_type = acep->a_type; + entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS; + if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP && + entry_type != ACE_EVERYONE) { + if (!aclp->z_has_fuids) + aclp->z_has_fuids = IS_EPHEMERAL(acep->a_who); + aceptr->z_fuid = (uint64_t)acep->a_who; + } + + /* + * Make sure ACE is valid + */ + if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type, + aceptr->z_hdr.z_flags) != B_TRUE) + return (EINVAL); + + switch (acep->a_type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + zobjacep = (zfs_object_ace_t *)aceptr; + aceobjp = (ace_object_t *)acep; + + bcopy(aceobjp->a_obj_type, zobjacep->z_object_type, + sizeof (aceobjp->a_obj_type)); + bcopy(aceobjp->a_inherit_obj_type, + zobjacep->z_inherit_type, + sizeof (aceobjp->a_inherit_obj_type)); + acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t)); + break; + default: + acep = (ace_t *)((caddr_t)acep + sizeof (ace_t)); + } + + aceptr = (zfs_ace_t *)((caddr_t)aceptr + + aclp->z_ops.ace_size(aceptr)); + } + + *size = (caddr_t)aceptr - (caddr_t)z_acl; + + return (0); +} + +/* + * Copy ZFS ACEs to fixed size ace_t layout + */ +static void +zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr, + void *datap, int filter) +{ + uint64_t who; + uint32_t access_mask; + uint16_t iflags, type; + zfs_ace_hdr_t *zacep = NULL; + ace_t *acep = datap; + ace_object_t *objacep; + zfs_object_ace_t *zobjacep; + size_t ace_size; + uint16_t entry_type; + + while (zacep = zfs_acl_next_ace(aclp, zacep, + &who, &access_mask, &iflags, &type)) { + + switch (type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + if (filter) { + continue; + } + zobjacep = (zfs_object_ace_t *)zacep; + objacep = (ace_object_t *)acep; + bcopy(zobjacep->z_object_type, + objacep->a_obj_type, + sizeof (zobjacep->z_object_type)); + bcopy(zobjacep->z_inherit_type, + objacep->a_inherit_obj_type, + sizeof (zobjacep->z_inherit_type)); + ace_size = sizeof (ace_object_t); + break; + default: + ace_size = sizeof (ace_t); + break; + } + + entry_type = (iflags & ACE_TYPE_FLAGS); + if ((entry_type != ACE_OWNER && + entry_type != OWNING_GROUP && + entry_type != ACE_EVERYONE)) { + acep->a_who = zfs_fuid_map_id(zfsvfs, who, + cr, (entry_type & ACE_IDENTIFIER_GROUP) ? + ZFS_ACE_GROUP : ZFS_ACE_USER); + } else { + acep->a_who = (uid_t)(int64_t)who; + } + acep->a_access_mask = access_mask; + acep->a_flags = iflags; + acep->a_type = type; + acep = (ace_t *)((caddr_t)acep + ace_size); + } +} + +static int +zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep, + zfs_oldace_t *z_acl, int aclcnt, size_t *size) +{ + int i; + zfs_oldace_t *aceptr = z_acl; + + for (i = 0; i != aclcnt; i++, aceptr++) { + aceptr->z_access_mask = acep[i].a_access_mask; + aceptr->z_type = acep[i].a_type; + aceptr->z_flags = acep[i].a_flags; + aceptr->z_fuid = acep[i].a_who; + /* + * Make sure ACE is valid + */ + if (zfs_ace_valid(obj_type, aclp, aceptr->z_type, + aceptr->z_flags) != B_TRUE) + return (EINVAL); + } + *size = (caddr_t)aceptr - (caddr_t)z_acl; + return (0); +} + +/* + * convert old ACL format to new + */ +void +zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp) +{ + zfs_oldace_t *oldaclp; + int i; + uint16_t type, iflags; + uint32_t access_mask; + uint64_t who; + void *cookie = NULL; + zfs_acl_node_t *newaclnode; + + ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL); + /* + * First create the ACE in a contiguous piece of memory + * for zfs_copy_ace_2_fuid(). + * + * We only convert an ACL once, so this won't happen + * everytime. + */ + oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count, + KM_SLEEP); + i = 0; + while (cookie = zfs_acl_next_ace(aclp, cookie, &who, + &access_mask, &iflags, &type)) { + oldaclp[i].z_flags = iflags; + oldaclp[i].z_type = type; + oldaclp[i].z_fuid = who; + oldaclp[i++].z_access_mask = access_mask; + } + + newaclnode = zfs_acl_node_alloc(aclp->z_acl_count * + sizeof (zfs_object_ace_t)); + aclp->z_ops = zfs_acl_fuid_ops; + VERIFY(zfs_copy_ace_2_fuid(ZTOV(zp)->v_type, aclp, oldaclp, + newaclnode->z_acldata, aclp->z_acl_count, + &newaclnode->z_size) == 0); + newaclnode->z_ace_count = aclp->z_acl_count; + aclp->z_version = ZFS_ACL_VERSION; + kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t)); + + /* + * Release all previous ACL nodes + */ + + zfs_acl_release_nodes(aclp); + + list_insert_head(&aclp->z_acl, newaclnode); + + aclp->z_acl_bytes = newaclnode->z_size; + aclp->z_acl_count = newaclnode->z_ace_count; + +} + +/* + * Convert unix access mask to v4 access mask + */ +static uint32_t +zfs_unix_to_v4(uint32_t access_mask) +{ + uint32_t new_mask = 0; + + if (access_mask & S_IXOTH) + new_mask |= ACE_EXECUTE; + if (access_mask & S_IWOTH) + new_mask |= ACE_WRITE_DATA; + if (access_mask & S_IROTH) + new_mask |= ACE_READ_DATA; + return (new_mask); +} + +static void +zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask, + uint16_t access_type, uint64_t fuid, uint16_t entry_type) +{ + uint16_t type = entry_type & ACE_TYPE_FLAGS; + + aclp->z_ops.ace_mask_set(acep, access_mask); + aclp->z_ops.ace_type_set(acep, access_type); + aclp->z_ops.ace_flags_set(acep, entry_type); + if ((type != ACE_OWNER && type != OWNING_GROUP && + type != ACE_EVERYONE)) + aclp->z_ops.ace_who_set(acep, fuid); +} + +/* + * Determine mode of file based on ACL. + * Also, create FUIDs for any User/Group ACEs + */ +static uint64_t +zfs_mode_fuid_compute(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, + zfs_fuid_info_t **fuidp, dmu_tx_t *tx) +{ + int entry_type; + mode_t mode; + mode_t seen = 0; + zfs_ace_hdr_t *acep = NULL; + uint64_t who; + uint16_t iflags, type; + uint32_t access_mask; + + mode = (zp->z_phys->zp_mode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX)); + + while (acep = zfs_acl_next_ace(aclp, acep, &who, + &access_mask, &iflags, &type)) { + + if (!zfs_acl_valid_ace_type(type, iflags)) + continue; + + entry_type = (iflags & ACE_TYPE_FLAGS); + + /* + * Skip over owner@, group@ or everyone@ inherit only ACEs + */ + if ((iflags & ACE_INHERIT_ONLY_ACE) && + (entry_type == ACE_OWNER || entry_type == ACE_EVERYONE || + entry_type == OWNING_GROUP)) + continue; + + if (entry_type == ACE_OWNER) { + if ((access_mask & ACE_READ_DATA) && + (!(seen & S_IRUSR))) { + seen |= S_IRUSR; + if (type == ALLOW) { + mode |= S_IRUSR; + } + } + if ((access_mask & ACE_WRITE_DATA) && + (!(seen & S_IWUSR))) { + seen |= S_IWUSR; + if (type == ALLOW) { + mode |= S_IWUSR; + } + } + if ((access_mask & ACE_EXECUTE) && + (!(seen & S_IXUSR))) { + seen |= S_IXUSR; + if (type == ALLOW) { + mode |= S_IXUSR; + } + } + } else if (entry_type == OWNING_GROUP) { + if ((access_mask & ACE_READ_DATA) && + (!(seen & S_IRGRP))) { + seen |= S_IRGRP; + if (type == ALLOW) { + mode |= S_IRGRP; + } + } + if ((access_mask & ACE_WRITE_DATA) && + (!(seen & S_IWGRP))) { + seen |= S_IWGRP; + if (type == ALLOW) { + mode |= S_IWGRP; + } + } + if ((access_mask & ACE_EXECUTE) && + (!(seen & S_IXGRP))) { + seen |= S_IXGRP; + if (type == ALLOW) { + mode |= S_IXGRP; + } + } + } else if (entry_type == ACE_EVERYONE) { + if ((access_mask & ACE_READ_DATA)) { + if (!(seen & S_IRUSR)) { + seen |= S_IRUSR; + if (type == ALLOW) { + mode |= S_IRUSR; + } + } + if (!(seen & S_IRGRP)) { + seen |= S_IRGRP; + if (type == ALLOW) { + mode |= S_IRGRP; + } + } + if (!(seen & S_IROTH)) { + seen |= S_IROTH; + if (type == ALLOW) { + mode |= S_IROTH; + } + } + } + if ((access_mask & ACE_WRITE_DATA)) { + if (!(seen & S_IWUSR)) { + seen |= S_IWUSR; + if (type == ALLOW) { + mode |= S_IWUSR; + } + } + if (!(seen & S_IWGRP)) { + seen |= S_IWGRP; + if (type == ALLOW) { + mode |= S_IWGRP; + } + } + if (!(seen & S_IWOTH)) { + seen |= S_IWOTH; + if (type == ALLOW) { + mode |= S_IWOTH; + } + } + } + if ((access_mask & ACE_EXECUTE)) { + if (!(seen & S_IXUSR)) { + seen |= S_IXUSR; + if (type == ALLOW) { + mode |= S_IXUSR; + } + } + if (!(seen & S_IXGRP)) { + seen |= S_IXGRP; + if (type == ALLOW) { + mode |= S_IXGRP; + } + } + if (!(seen & S_IXOTH)) { + seen |= S_IXOTH; + if (type == ALLOW) { + mode |= S_IXOTH; + } + } + } + } + /* + * Now handle FUID create for user/group ACEs + */ + if (entry_type == 0 || entry_type == ACE_IDENTIFIER_GROUP) { + aclp->z_ops.ace_who_set(acep, + zfs_fuid_create(zp->z_zfsvfs, who, cr, + (entry_type == 0) ? ZFS_ACE_USER : ZFS_ACE_GROUP, + tx, fuidp)); + } + } + return (mode); +} + +static zfs_acl_t * +zfs_acl_node_read_internal(znode_t *zp, boolean_t will_modify) +{ + zfs_acl_t *aclp; + zfs_acl_node_t *aclnode; + + aclp = zfs_acl_alloc(zp->z_phys->zp_acl.z_acl_version); + + /* + * Version 0 to 1 znode_acl_phys has the size/count fields swapped. + * Version 0 didn't have a size field, only a count. + */ + if (zp->z_phys->zp_acl.z_acl_version == ZFS_ACL_VERSION_INITIAL) { + aclp->z_acl_count = zp->z_phys->zp_acl.z_acl_size; + aclp->z_acl_bytes = ZFS_ACL_SIZE(aclp->z_acl_count); + } else { + aclp->z_acl_count = zp->z_phys->zp_acl.z_acl_count; + aclp->z_acl_bytes = zp->z_phys->zp_acl.z_acl_size; + } + + aclnode = zfs_acl_node_alloc(will_modify ? aclp->z_acl_bytes : 0); + aclnode->z_ace_count = aclp->z_acl_count; + if (will_modify) { + bcopy(zp->z_phys->zp_acl.z_ace_data, aclnode->z_acldata, + aclp->z_acl_bytes); + } else { + aclnode->z_size = aclp->z_acl_bytes; + aclnode->z_acldata = &zp->z_phys->zp_acl.z_ace_data[0]; + } + + list_insert_head(&aclp->z_acl, aclnode); + + return (aclp); +} + +/* + * Read an external acl object. + */ +static int +zfs_acl_node_read(znode_t *zp, zfs_acl_t **aclpp, boolean_t will_modify) +{ + uint64_t extacl = zp->z_phys->zp_acl.z_acl_extern_obj; + zfs_acl_t *aclp; + size_t aclsize; + size_t acl_count; + zfs_acl_node_t *aclnode; + int error; + + ASSERT(MUTEX_HELD(&zp->z_acl_lock)); + + if (zp->z_phys->zp_acl.z_acl_extern_obj == 0) { + *aclpp = zfs_acl_node_read_internal(zp, will_modify); + return (0); + } + + aclp = zfs_acl_alloc(zp->z_phys->zp_acl.z_acl_version); + if (zp->z_phys->zp_acl.z_acl_version == ZFS_ACL_VERSION_INITIAL) { + zfs_acl_phys_v0_t *zacl0 = + (zfs_acl_phys_v0_t *)&zp->z_phys->zp_acl; + + aclsize = ZFS_ACL_SIZE(zacl0->z_acl_count); + acl_count = zacl0->z_acl_count; + } else { + aclsize = zp->z_phys->zp_acl.z_acl_size; + acl_count = zp->z_phys->zp_acl.z_acl_count; + if (aclsize == 0) + aclsize = acl_count * sizeof (zfs_ace_t); + } + aclnode = zfs_acl_node_alloc(aclsize); + list_insert_head(&aclp->z_acl, aclnode); + error = dmu_read(zp->z_zfsvfs->z_os, extacl, 0, + aclsize, aclnode->z_acldata); + aclnode->z_ace_count = acl_count; + aclp->z_acl_count = acl_count; + aclp->z_acl_bytes = aclsize; + + if (error != 0) { + zfs_acl_free(aclp); + /* convert checksum errors into IO errors */ + if (error == ECKSUM) + error = EIO; + return (error); + } + + *aclpp = aclp; + return (0); +} + +/* + * common code for setting ACLs. + * + * This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl. + * zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's + * already checked the acl and knows whether to inherit. + */ +int +zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, + zfs_fuid_info_t **fuidp, dmu_tx_t *tx) +{ + int error; + znode_phys_t *zphys = zp->z_phys; + zfs_acl_phys_t *zacl = &zphys->zp_acl; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + uint64_t aoid = zphys->zp_acl.z_acl_extern_obj; + uint64_t off = 0; + dmu_object_type_t otype; + zfs_acl_node_t *aclnode; + + ASSERT(MUTEX_HELD(&zp->z_lock)); + ASSERT(MUTEX_HELD(&zp->z_acl_lock)); + + dmu_buf_will_dirty(zp->z_dbuf, tx); + + zphys->zp_mode = zfs_mode_fuid_compute(zp, aclp, cr, fuidp, tx); + + /* + * Decide which opbject type to use. If we are forced to + * use old ACL format than transform ACL into zfs_oldace_t + * layout. + */ + if (!zfsvfs->z_use_fuids) { + otype = DMU_OT_OLDACL; + } else { + if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && + (zfsvfs->z_version >= ZPL_VERSION_FUID)) + zfs_acl_xform(zp, aclp); + ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); + otype = DMU_OT_ACL; + } + + if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { + /* + * If ACL was previously external and we are now + * converting to new ACL format then release old + * ACL object and create a new one. + */ + if (aoid && aclp->z_version != zacl->z_acl_version) { + error = dmu_object_free(zfsvfs->z_os, + zp->z_phys->zp_acl.z_acl_extern_obj, tx); + if (error) + return (error); + aoid = 0; + } + if (aoid == 0) { + aoid = dmu_object_alloc(zfsvfs->z_os, + otype, aclp->z_acl_bytes, + otype == DMU_OT_ACL ? DMU_OT_SYSACL : DMU_OT_NONE, + otype == DMU_OT_ACL ? DN_MAX_BONUSLEN : 0, tx); + } else { + (void) dmu_object_set_blocksize(zfsvfs->z_os, aoid, + aclp->z_acl_bytes, 0, tx); + } + zphys->zp_acl.z_acl_extern_obj = aoid; + for (aclnode = list_head(&aclp->z_acl); aclnode; + aclnode = list_next(&aclp->z_acl, aclnode)) { + if (aclnode->z_ace_count == 0) + continue; + dmu_write(zfsvfs->z_os, aoid, off, + aclnode->z_size, aclnode->z_acldata, tx); + off += aclnode->z_size; + } + } else { + void *start = zacl->z_ace_data; + /* + * Migrating back embedded? + */ + if (zphys->zp_acl.z_acl_extern_obj) { + error = dmu_object_free(zfsvfs->z_os, + zp->z_phys->zp_acl.z_acl_extern_obj, tx); + if (error) + return (error); + zphys->zp_acl.z_acl_extern_obj = 0; + } + + for (aclnode = list_head(&aclp->z_acl); aclnode; + aclnode = list_next(&aclp->z_acl, aclnode)) { + if (aclnode->z_ace_count == 0) + continue; + bcopy(aclnode->z_acldata, start, aclnode->z_size); + start = (caddr_t)start + aclnode->z_size; + } + } + + /* + * If Old version then swap count/bytes to match old + * layout of znode_acl_phys_t. + */ + if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { + zphys->zp_acl.z_acl_size = aclp->z_acl_count; + zphys->zp_acl.z_acl_count = aclp->z_acl_bytes; + } else { + zphys->zp_acl.z_acl_size = aclp->z_acl_bytes; + zphys->zp_acl.z_acl_count = aclp->z_acl_count; + } + + zphys->zp_acl.z_acl_version = aclp->z_version; + + /* + * Replace ACL wide bits, but first clear them. + */ + zp->z_phys->zp_flags &= ~ZFS_ACL_WIDE_FLAGS; + + zp->z_phys->zp_flags |= aclp->z_hints; + + if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0) + zp->z_phys->zp_flags |= ZFS_ACL_TRIVIAL; + + zfs_time_stamper_locked(zp, STATE_CHANGED, tx); + return (0); +} + +/* + * Update access mask for prepended ACE + * + * This applies the "groupmask" value for aclmode property. + */ +static void +zfs_acl_prepend_fixup(zfs_acl_t *aclp, void *acep, void *origacep, + mode_t mode, uint64_t owner) +{ + int rmask, wmask, xmask; + int user_ace; + uint16_t aceflags; + uint32_t origmask, acepmask; + uint64_t fuid; + + aceflags = aclp->z_ops.ace_flags_get(acep); + fuid = aclp->z_ops.ace_who_get(acep); + origmask = aclp->z_ops.ace_mask_get(origacep); + acepmask = aclp->z_ops.ace_mask_get(acep); + + user_ace = (!(aceflags & + (ACE_OWNER|ACE_GROUP|ACE_IDENTIFIER_GROUP))); + + if (user_ace && (fuid == owner)) { + rmask = S_IRUSR; + wmask = S_IWUSR; + xmask = S_IXUSR; + } else { + rmask = S_IRGRP; + wmask = S_IWGRP; + xmask = S_IXGRP; + } + + if (origmask & ACE_READ_DATA) { + if (mode & rmask) { + acepmask &= ~ACE_READ_DATA; + } else { + acepmask |= ACE_READ_DATA; + } + } + + if (origmask & ACE_WRITE_DATA) { + if (mode & wmask) { + acepmask &= ~ACE_WRITE_DATA; + } else { + acepmask |= ACE_WRITE_DATA; + } + } + + if (origmask & ACE_APPEND_DATA) { + if (mode & wmask) { + acepmask &= ~ACE_APPEND_DATA; + } else { + acepmask |= ACE_APPEND_DATA; + } + } + + if (origmask & ACE_EXECUTE) { + if (mode & xmask) { + acepmask &= ~ACE_EXECUTE; + } else { + acepmask |= ACE_EXECUTE; + } + } + aclp->z_ops.ace_mask_set(acep, acepmask); +} + +/* + * Apply mode to canonical six ACEs. + */ +static void +zfs_acl_fixup_canonical_six(zfs_acl_t *aclp, mode_t mode) +{ + zfs_acl_node_t *aclnode = list_tail(&aclp->z_acl); + void *acep; + int maskoff = aclp->z_ops.ace_mask_off(); + size_t abstract_size = aclp->z_ops.ace_abstract_size(); + + ASSERT(aclnode != NULL); + + acep = (void *)((caddr_t)aclnode->z_acldata + + aclnode->z_size - (abstract_size * 6)); + + /* + * Fixup final ACEs to match the mode + */ + + adjust_ace_pair_common(acep, maskoff, abstract_size, + (mode & 0700) >> 6); /* owner@ */ + + acep = (caddr_t)acep + (abstract_size * 2); + + adjust_ace_pair_common(acep, maskoff, abstract_size, + (mode & 0070) >> 3); /* group@ */ + + acep = (caddr_t)acep + (abstract_size * 2); + adjust_ace_pair_common(acep, maskoff, + abstract_size, mode); /* everyone@ */ +} + + +static int +zfs_acl_ace_match(zfs_acl_t *aclp, void *acep, int allow_deny, + int entry_type, int accessmask) +{ + uint32_t mask = aclp->z_ops.ace_mask_get(acep); + uint16_t type = aclp->z_ops.ace_type_get(acep); + uint16_t flags = aclp->z_ops.ace_flags_get(acep); + + return (mask == accessmask && type == allow_deny && + ((flags & ACE_TYPE_FLAGS) == entry_type)); +} + +/* + * Can prepended ACE be reused? + */ +static int +zfs_reuse_deny(zfs_acl_t *aclp, void *acep, void *prevacep) +{ + int okay_masks; + uint16_t prevtype; + uint16_t prevflags; + uint16_t flags; + uint32_t mask, prevmask; + + if (prevacep == NULL) + return (B_FALSE); + + prevtype = aclp->z_ops.ace_type_get(prevacep); + prevflags = aclp->z_ops.ace_flags_get(prevacep); + flags = aclp->z_ops.ace_flags_get(acep); + mask = aclp->z_ops.ace_mask_get(acep); + prevmask = aclp->z_ops.ace_mask_get(prevacep); + + if (prevtype != DENY) + return (B_FALSE); + + if (prevflags != (flags & ACE_IDENTIFIER_GROUP)) + return (B_FALSE); + + okay_masks = (mask & OKAY_MASK_BITS); + + if (prevmask & ~okay_masks) + return (B_FALSE); + + return (B_TRUE); +} + + +/* + * Insert new ACL node into chain of zfs_acl_node_t's + * + * This will result in two possible results. + * 1. If the ACL is currently just a single zfs_acl_node and + * we are prepending the entry then current acl node will have + * a new node inserted above it. + * + * 2. If we are inserting in the middle of current acl node then + * the current node will be split in two and new node will be inserted + * in between the two split nodes. + */ +static zfs_acl_node_t * +zfs_acl_ace_insert(zfs_acl_t *aclp, void *acep) +{ + zfs_acl_node_t *newnode; + zfs_acl_node_t *trailernode = NULL; + zfs_acl_node_t *currnode = zfs_acl_curr_node(aclp); + int curr_idx = aclp->z_curr_node->z_ace_idx; + int trailer_count; + size_t oldsize; + + newnode = zfs_acl_node_alloc(aclp->z_ops.ace_size(acep)); + newnode->z_ace_count = 1; + + oldsize = currnode->z_size; + + if (curr_idx != 1) { + trailernode = zfs_acl_node_alloc(0); + trailernode->z_acldata = acep; + + trailer_count = currnode->z_ace_count - curr_idx + 1; + currnode->z_ace_count = curr_idx - 1; + currnode->z_size = (caddr_t)acep - (caddr_t)currnode->z_acldata; + trailernode->z_size = oldsize - currnode->z_size; + trailernode->z_ace_count = trailer_count; + } + + aclp->z_acl_count += 1; + aclp->z_acl_bytes += aclp->z_ops.ace_size(acep); + + if (curr_idx == 1) + list_insert_before(&aclp->z_acl, currnode, newnode); + else + list_insert_after(&aclp->z_acl, currnode, newnode); + if (trailernode) { + list_insert_after(&aclp->z_acl, newnode, trailernode); + aclp->z_curr_node = trailernode; + trailernode->z_ace_idx = 1; + } + + return (newnode); +} + +/* + * Prepend deny ACE + */ +static void * +zfs_acl_prepend_deny(znode_t *zp, zfs_acl_t *aclp, void *acep, + mode_t mode) +{ + zfs_acl_node_t *aclnode; + void *newacep; + uint64_t fuid; + uint16_t flags; + + aclnode = zfs_acl_ace_insert(aclp, acep); + newacep = aclnode->z_acldata; + fuid = aclp->z_ops.ace_who_get(acep); + flags = aclp->z_ops.ace_flags_get(acep); + zfs_set_ace(aclp, newacep, 0, DENY, fuid, (flags & ACE_TYPE_FLAGS)); + zfs_acl_prepend_fixup(aclp, newacep, acep, mode, zp->z_phys->zp_uid); + + return (newacep); +} + +/* + * Split an inherited ACE into inherit_only ACE + * and original ACE with inheritance flags stripped off. + */ +static void +zfs_acl_split_ace(zfs_acl_t *aclp, zfs_ace_hdr_t *acep) +{ + zfs_acl_node_t *aclnode; + zfs_acl_node_t *currnode; + void *newacep; + uint16_t type, flags; + uint32_t mask; + uint64_t fuid; + + type = aclp->z_ops.ace_type_get(acep); + flags = aclp->z_ops.ace_flags_get(acep); + mask = aclp->z_ops.ace_mask_get(acep); + fuid = aclp->z_ops.ace_who_get(acep); + + aclnode = zfs_acl_ace_insert(aclp, acep); + newacep = aclnode->z_acldata; + + aclp->z_ops.ace_type_set(newacep, type); + aclp->z_ops.ace_flags_set(newacep, flags | ACE_INHERIT_ONLY_ACE); + aclp->z_ops.ace_mask_set(newacep, mask); + aclp->z_ops.ace_type_set(newacep, type); + aclp->z_ops.ace_who_set(newacep, fuid); + aclp->z_next_ace = acep; + flags &= ~ALL_INHERIT; + aclp->z_ops.ace_flags_set(acep, flags); + currnode = zfs_acl_curr_node(aclp); + ASSERT(currnode->z_ace_idx >= 1); + currnode->z_ace_idx -= 1; +} + +/* + * Are ACES started at index i, the canonical six ACES? + */ +static int +zfs_have_canonical_six(zfs_acl_t *aclp) +{ + void *acep; + zfs_acl_node_t *aclnode = list_tail(&aclp->z_acl); + int i = 0; + size_t abstract_size = aclp->z_ops.ace_abstract_size(); + + ASSERT(aclnode != NULL); + + if (aclnode->z_ace_count < 6) + return (0); + + acep = (void *)((caddr_t)aclnode->z_acldata + + aclnode->z_size - (aclp->z_ops.ace_abstract_size() * 6)); + + if ((zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), + DENY, ACE_OWNER, 0) && + zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), + ALLOW, ACE_OWNER, OWNER_ALLOW_MASK) && + zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), DENY, + OWNING_GROUP, 0) && zfs_acl_ace_match(aclp, (caddr_t)acep + + (abstract_size * i++), + ALLOW, OWNING_GROUP, 0) && + zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), + DENY, ACE_EVERYONE, EVERYONE_DENY_MASK) && + zfs_acl_ace_match(aclp, (caddr_t)acep + (abstract_size * i++), + ALLOW, ACE_EVERYONE, EVERYONE_ALLOW_MASK))) { + return (1); + } else { + return (0); + } +} + + +/* + * Apply step 1g, to group entries + * + * Need to deal with corner case where group may have + * greater permissions than owner. If so then limit + * group permissions, based on what extra permissions + * group has. + */ +static void +zfs_fixup_group_entries(zfs_acl_t *aclp, void *acep, void *prevacep, + mode_t mode) +{ + uint32_t prevmask = aclp->z_ops.ace_mask_get(prevacep); + uint32_t mask = aclp->z_ops.ace_mask_get(acep); + uint16_t prevflags = aclp->z_ops.ace_flags_get(prevacep); + mode_t extramode = (mode >> 3) & 07; + mode_t ownermode = (mode >> 6); + + if (prevflags & ACE_IDENTIFIER_GROUP) { + + extramode &= ~ownermode; + + if (extramode) { + if (extramode & S_IROTH) { + prevmask &= ~ACE_READ_DATA; + mask &= ~ACE_READ_DATA; + } + if (extramode & S_IWOTH) { + prevmask &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); + mask &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); + } + if (extramode & S_IXOTH) { + prevmask &= ~ACE_EXECUTE; + mask &= ~ACE_EXECUTE; + } + } + } + aclp->z_ops.ace_mask_set(acep, mask); + aclp->z_ops.ace_mask_set(prevacep, prevmask); +} + +/* + * Apply the chmod algorithm as described + * in PSARC/2002/240 + */ +static void +zfs_acl_chmod(znode_t *zp, uint64_t mode, zfs_acl_t *aclp) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + void *acep = NULL, *prevacep = NULL; + uint64_t who; + int i; + int entry_type; + int reuse_deny; + int need_canonical_six = 1; + uint16_t iflags, type; + uint32_t access_mask; + + ASSERT(MUTEX_HELD(&zp->z_acl_lock)); + ASSERT(MUTEX_HELD(&zp->z_lock)); + + aclp->z_hints = (zp->z_phys->zp_flags & V4_ACL_WIDE_FLAGS); + + /* + * If discard then just discard all ACL nodes which + * represent the ACEs. + * + * New owner@/group@/everone@ ACEs will be added + * later. + */ + if (zfsvfs->z_acl_mode == ZFS_ACL_DISCARD) + zfs_acl_release_nodes(aclp); + + while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, + &iflags, &type)) { + + entry_type = (iflags & ACE_TYPE_FLAGS); + iflags = (iflags & ALL_INHERIT); + + if ((type != ALLOW && type != DENY) || + (iflags & ACE_INHERIT_ONLY_ACE)) { + if (iflags) + aclp->z_hints |= ZFS_INHERIT_ACE; + switch (type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + aclp->z_hints |= ZFS_ACL_OBJ_ACE; + break; + } + goto nextace; + } + + /* + * Need to split ace into two? + */ + if ((iflags & (ACE_FILE_INHERIT_ACE| + ACE_DIRECTORY_INHERIT_ACE)) && + (!(iflags & ACE_INHERIT_ONLY_ACE))) { + zfs_acl_split_ace(aclp, acep); + aclp->z_hints |= ZFS_INHERIT_ACE; + goto nextace; + } + + if (entry_type == ACE_OWNER || entry_type == ACE_EVERYONE || + (entry_type == OWNING_GROUP)) { + access_mask &= ~OGE_CLEAR; + aclp->z_ops.ace_mask_set(acep, access_mask); + goto nextace; + } else { + reuse_deny = B_TRUE; + if (type == ALLOW) { + + /* + * Check preceding ACE if any, to see + * if we need to prepend a DENY ACE. + * This is only applicable when the acl_mode + * property == groupmask. + */ + if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK) { + + reuse_deny = zfs_reuse_deny(aclp, acep, + prevacep); + + if (!reuse_deny) { + prevacep = + zfs_acl_prepend_deny(zp, + aclp, acep, mode); + } else { + zfs_acl_prepend_fixup( + aclp, prevacep, + acep, mode, + zp->z_phys->zp_uid); + } + zfs_fixup_group_entries(aclp, acep, + prevacep, mode); + + } + } + } +nextace: + prevacep = acep; + } + + /* + * Check out last six aces, if we have six. + */ + + if (aclp->z_acl_count >= 6) { + if (zfs_have_canonical_six(aclp)) { + need_canonical_six = 0; + } + } + + if (need_canonical_six) { + size_t abstract_size = aclp->z_ops.ace_abstract_size(); + void *zacep; + zfs_acl_node_t *aclnode = + zfs_acl_node_alloc(abstract_size * 6); + + aclnode->z_size = abstract_size * 6; + aclnode->z_ace_count = 6; + aclp->z_acl_bytes += aclnode->z_size; + list_insert_tail(&aclp->z_acl, aclnode); + + zacep = aclnode->z_acldata; + + i = 0; + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), + 0, DENY, -1, ACE_OWNER); + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), + OWNER_ALLOW_MASK, ALLOW, -1, ACE_OWNER); + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 0, + DENY, -1, OWNING_GROUP); + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), 0, + ALLOW, -1, OWNING_GROUP); + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), + EVERYONE_DENY_MASK, DENY, -1, ACE_EVERYONE); + zfs_set_ace(aclp, (caddr_t)zacep + (abstract_size * i++), + EVERYONE_ALLOW_MASK, ALLOW, -1, ACE_EVERYONE); + aclp->z_acl_count += 6; + } + + zfs_acl_fixup_canonical_six(aclp, mode); +} + +int +zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode) +{ + int error; + + mutex_enter(&zp->z_lock); + mutex_enter(&zp->z_acl_lock); + *aclp = NULL; + error = zfs_acl_node_read(zp, aclp, B_TRUE); + if (error == 0) + zfs_acl_chmod(zp, mode, *aclp); + mutex_exit(&zp->z_acl_lock); + mutex_exit(&zp->z_lock); + return (error); +} + +/* + * strip off write_owner and write_acl + */ +static void +zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep) +{ + uint32_t mask = aclp->z_ops.ace_mask_get(acep); + + if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) && + (aclp->z_ops.ace_type_get(acep) == ALLOW)) { + mask &= ~RESTRICTED_CLEAR; + aclp->z_ops.ace_mask_set(acep, mask); + } +} + +/* + * Should ACE be inherited? + */ +static int +zfs_ace_can_use(znode_t *zp, uint16_t acep_flags) +{ + int vtype = ZTOV(zp)->v_type; + int iflags = (acep_flags & 0xf); + + if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE)) + return (1); + else if (iflags & ACE_FILE_INHERIT_ACE) + return (!((vtype == VDIR) && + (iflags & ACE_NO_PROPAGATE_INHERIT_ACE))); + return (0); +} + +/* + * inherit inheritable ACEs from parent + */ +static zfs_acl_t * +zfs_acl_inherit(znode_t *zp, zfs_acl_t *paclp, uint64_t mode, + boolean_t *need_chmod) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + void *pacep; + void *acep, *acep2; + zfs_acl_node_t *aclnode, *aclnode2; + zfs_acl_t *aclp = NULL; + uint64_t who; + uint32_t access_mask; + uint16_t iflags, newflags, type; + size_t ace_size; + void *data1, *data2; + size_t data1sz, data2sz; + boolean_t vdir = ZTOV(zp)->v_type == VDIR; + boolean_t vreg = ZTOV(zp)->v_type == VREG; + boolean_t passthrough, passthrough_x, noallow; + + passthrough_x = + zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X; + passthrough = passthrough_x || + zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH; + noallow = + zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW; + + *need_chmod = B_TRUE; + pacep = NULL; + aclp = zfs_acl_alloc(paclp->z_version); + if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD) + return (aclp); + while (pacep = zfs_acl_next_ace(paclp, pacep, &who, + &access_mask, &iflags, &type)) { + + /* + * don't inherit bogus ACEs + */ + if (!zfs_acl_valid_ace_type(type, iflags)) + continue; + + if (noallow && type == ALLOW) + continue; + + ace_size = aclp->z_ops.ace_size(pacep); + + if (!zfs_ace_can_use(zp, iflags)) + continue; + + /* + * If owner@, group@, or everyone@ inheritable + * then zfs_acl_chmod() isn't needed. + */ + if (passthrough && + ((iflags & (ACE_OWNER|ACE_EVERYONE)) || + ((iflags & OWNING_GROUP) == + OWNING_GROUP)) && (vreg || (vdir && (iflags & + ACE_DIRECTORY_INHERIT_ACE)))) { + *need_chmod = B_FALSE; + + if (!vdir && passthrough_x && + ((mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) { + access_mask &= ~ACE_EXECUTE; + } + } + + aclnode = zfs_acl_node_alloc(ace_size); + list_insert_tail(&aclp->z_acl, aclnode); + acep = aclnode->z_acldata; + + zfs_set_ace(aclp, acep, access_mask, type, + who, iflags|ACE_INHERITED_ACE); + + /* + * Copy special opaque data if any + */ + if ((data1sz = paclp->z_ops.ace_data(pacep, &data1)) != 0) { + VERIFY((data2sz = aclp->z_ops.ace_data(acep, + &data2)) == data1sz); + bcopy(data1, data2, data2sz); + } + aclp->z_acl_count++; + aclnode->z_ace_count++; + aclp->z_acl_bytes += aclnode->z_size; + newflags = aclp->z_ops.ace_flags_get(acep); + + if (vdir) + aclp->z_hints |= ZFS_INHERIT_ACE; + + if ((iflags & ACE_NO_PROPAGATE_INHERIT_ACE) || !vdir) { + newflags &= ~ALL_INHERIT; + aclp->z_ops.ace_flags_set(acep, + newflags|ACE_INHERITED_ACE); + zfs_restricted_update(zfsvfs, aclp, acep); + continue; + } + + ASSERT(vdir); + + newflags = aclp->z_ops.ace_flags_get(acep); + if ((iflags & (ACE_FILE_INHERIT_ACE | + ACE_DIRECTORY_INHERIT_ACE)) != + ACE_FILE_INHERIT_ACE) { + aclnode2 = zfs_acl_node_alloc(ace_size); + list_insert_tail(&aclp->z_acl, aclnode2); + acep2 = aclnode2->z_acldata; + zfs_set_ace(aclp, acep2, + access_mask, type, who, + iflags|ACE_INHERITED_ACE); + newflags |= ACE_INHERIT_ONLY_ACE; + aclp->z_ops.ace_flags_set(acep, newflags); + newflags &= ~ALL_INHERIT; + aclp->z_ops.ace_flags_set(acep2, + newflags|ACE_INHERITED_ACE); + + /* + * Copy special opaque data if any + */ + if ((data1sz = aclp->z_ops.ace_data(acep, + &data1)) != 0) { + VERIFY((data2sz = + aclp->z_ops.ace_data(acep2, + &data2)) == data1sz); + bcopy(data1, data2, data1sz); + } + aclp->z_acl_count++; + aclnode2->z_ace_count++; + aclp->z_acl_bytes += aclnode->z_size; + zfs_restricted_update(zfsvfs, aclp, acep2); + } else { + newflags |= ACE_INHERIT_ONLY_ACE; + aclp->z_ops.ace_flags_set(acep, + newflags|ACE_INHERITED_ACE); + } + } + return (aclp); +} + +/* + * Create file system object initial permissions + * including inheritable ACEs. + */ +void +zfs_perm_init(znode_t *zp, znode_t *parent, int flag, + vattr_t *vap, dmu_tx_t *tx, cred_t *cr, + zfs_acl_t *setaclp, zfs_fuid_info_t **fuidp) +{ + uint64_t mode, fuid, fgid; + int error; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zfs_acl_t *aclp = NULL; + zfs_acl_t *paclp; + xvattr_t *xvap = (xvattr_t *)vap; + gid_t gid; + boolean_t need_chmod = B_TRUE; + + if (setaclp) + aclp = setaclp; + + mode = MAKEIMODE(vap->va_type, vap->va_mode); + + /* + * Determine uid and gid. + */ + if ((flag & (IS_ROOT_NODE | IS_REPLAY)) || + ((flag & IS_XATTR) && (vap->va_type == VDIR))) { + fuid = zfs_fuid_create(zfsvfs, vap->va_uid, cr, + ZFS_OWNER, tx, fuidp); + fgid = zfs_fuid_create(zfsvfs, vap->va_gid, cr, + ZFS_GROUP, tx, fuidp); + gid = vap->va_gid; + } else { + fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER, tx, cr, fuidp); + fgid = 0; + if (vap->va_mask & AT_GID) { + fgid = zfs_fuid_create(zfsvfs, vap->va_gid, cr, + ZFS_GROUP, tx, fuidp); + gid = vap->va_gid; + if (fgid != parent->z_phys->zp_gid && + !groupmember(vap->va_gid, cr) && + secpolicy_vnode_create_gid(cr) != 0) + fgid = 0; + } + if (fgid == 0) { + if (parent->z_phys->zp_mode & S_ISGID) { + fgid = parent->z_phys->zp_gid; + gid = zfs_fuid_map_id(zfsvfs, fgid, + cr, ZFS_GROUP); + } else { + fgid = zfs_fuid_create_cred(zfsvfs, + ZFS_GROUP, tx, cr, fuidp); + gid = crgetgid(cr); + } + } + } + + /* + * If we're creating a directory, and the parent directory has the + * set-GID bit set, set in on the new directory. + * Otherwise, if the user is neither privileged nor a member of the + * file's new group, clear the file's set-GID bit. + */ + + if ((parent->z_phys->zp_mode & S_ISGID) && (vap->va_type == VDIR)) { + mode |= S_ISGID; + } else { + if ((mode & S_ISGID) && + secpolicy_vnode_setids_setgids(cr, gid) != 0) + mode &= ~S_ISGID; + } + + zp->z_phys->zp_uid = fuid; + zp->z_phys->zp_gid = fgid; + zp->z_phys->zp_mode = mode; + + if (aclp == NULL) { + mutex_enter(&parent->z_lock); + if ((ZTOV(parent)->v_type == VDIR && + (parent->z_phys->zp_flags & ZFS_INHERIT_ACE)) && + !(zp->z_phys->zp_flags & ZFS_XATTR)) { + mutex_enter(&parent->z_acl_lock); + VERIFY(0 == zfs_acl_node_read(parent, &paclp, B_FALSE)); + mutex_exit(&parent->z_acl_lock); + aclp = zfs_acl_inherit(zp, paclp, mode, &need_chmod); + zfs_acl_free(paclp); + } else { + aclp = zfs_acl_alloc(zfs_acl_version_zp(zp)); + } + mutex_exit(&parent->z_lock); + mutex_enter(&zp->z_lock); + mutex_enter(&zp->z_acl_lock); + if (need_chmod) + zfs_acl_chmod(zp, mode, aclp); + } else { + mutex_enter(&zp->z_lock); + mutex_enter(&zp->z_acl_lock); + } + + /* Force auto_inherit on all new directory objects */ + if (vap->va_type == VDIR) + aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; + + error = zfs_aclset_common(zp, aclp, cr, fuidp, tx); + + /* Set optional attributes if any */ + if (vap->va_mask & AT_XVATTR) + zfs_xvattr_set(zp, xvap); + + mutex_exit(&zp->z_lock); + mutex_exit(&zp->z_acl_lock); + ASSERT3U(error, ==, 0); + + if (aclp != setaclp) + zfs_acl_free(aclp); +} + +/* + * Retrieve a files ACL + */ +int +zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) +{ + zfs_acl_t *aclp; + ulong_t mask; + int error; + int count = 0; + int largeace = 0; + + mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT | + VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES); + + if (error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)) + return (error); + + if (mask == 0) + return (ENOSYS); + + mutex_enter(&zp->z_acl_lock); + + error = zfs_acl_node_read(zp, &aclp, B_FALSE); + if (error != 0) { + mutex_exit(&zp->z_acl_lock); + return (error); + } + + /* + * Scan ACL to determine number of ACEs + */ + if ((zp->z_phys->zp_flags & ZFS_ACL_OBJ_ACE) && + !(mask & VSA_ACE_ALLTYPES)) { + void *zacep = NULL; + uint64_t who; + uint32_t access_mask; + uint16_t type, iflags; + + while (zacep = zfs_acl_next_ace(aclp, zacep, + &who, &access_mask, &iflags, &type)) { + switch (type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + largeace++; + continue; + default: + count++; + } + } + vsecp->vsa_aclcnt = count; + } else + count = aclp->z_acl_count; + + if (mask & VSA_ACECNT) { + vsecp->vsa_aclcnt = count; + } + + if (mask & VSA_ACE) { + size_t aclsz; + + zfs_acl_node_t *aclnode = list_head(&aclp->z_acl); + + aclsz = count * sizeof (ace_t) + + sizeof (ace_object_t) * largeace; + + vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP); + vsecp->vsa_aclentsz = aclsz; + + if (aclp->z_version == ZFS_ACL_VERSION_FUID) + zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr, + vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES)); + else { + bcopy(aclnode->z_acldata, vsecp->vsa_aclentp, + count * sizeof (ace_t)); + } + } + if (mask & VSA_ACE_ACLFLAGS) { + vsecp->vsa_aclflags = 0; + if (zp->z_phys->zp_flags & ZFS_ACL_DEFAULTED) + vsecp->vsa_aclflags |= ACL_DEFAULTED; + if (zp->z_phys->zp_flags & ZFS_ACL_PROTECTED) + vsecp->vsa_aclflags |= ACL_PROTECTED; + if (zp->z_phys->zp_flags & ZFS_ACL_AUTO_INHERIT) + vsecp->vsa_aclflags |= ACL_AUTO_INHERIT; + } + + mutex_exit(&zp->z_acl_lock); + + zfs_acl_free(aclp); + + return (0); +} + +int +zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type, + vsecattr_t *vsecp, zfs_acl_t **zaclp) +{ + zfs_acl_t *aclp; + zfs_acl_node_t *aclnode; + int aclcnt = vsecp->vsa_aclcnt; + int error; + + if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0) + return (EINVAL); + + aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version)); + + aclp->z_hints = 0; + aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t)); + if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) { + if ((error = zfs_copy_ace_2_oldace(obj_type, aclp, + (ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata, + aclcnt, &aclnode->z_size)) != 0) { + zfs_acl_free(aclp); + zfs_acl_node_free(aclnode); + return (error); + } + } else { + if ((error = zfs_copy_ace_2_fuid(obj_type, aclp, + vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt, + &aclnode->z_size)) != 0) { + zfs_acl_free(aclp); + zfs_acl_node_free(aclnode); + return (error); + } + } + aclp->z_acl_bytes = aclnode->z_size; + aclnode->z_ace_count = aclcnt; + aclp->z_acl_count = aclcnt; + list_insert_head(&aclp->z_acl, aclnode); + + /* + * If flags are being set then add them to z_hints + */ + if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) { + if (vsecp->vsa_aclflags & ACL_PROTECTED) + aclp->z_hints |= ZFS_ACL_PROTECTED; + if (vsecp->vsa_aclflags & ACL_DEFAULTED) + aclp->z_hints |= ZFS_ACL_DEFAULTED; + if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT) + aclp->z_hints |= ZFS_ACL_AUTO_INHERIT; + } + + *zaclp = aclp; + + return (0); +} + +/* + * Set a files ACL + */ +int +zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zilog_t *zilog = zfsvfs->z_log; + ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT); + dmu_tx_t *tx; + int error; + zfs_acl_t *aclp; + zfs_fuid_info_t *fuidp = NULL; + + if (mask == 0) + return (ENOSYS); + + if (zp->z_phys->zp_flags & ZFS_IMMUTABLE) + return (EPERM); + + if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)) + return (error); + + error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, &aclp); + if (error) + return (error); + + /* + * If ACL wide flags aren't being set then preserve any + * existing flags. + */ + if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) { + aclp->z_hints |= (zp->z_phys->zp_flags & V4_ACL_WIDE_FLAGS); + } +top: + if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)) { + zfs_acl_free(aclp); + return (error); + } + + mutex_enter(&zp->z_lock); + mutex_enter(&zp->z_acl_lock); + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, zp->z_id); + + if (zp->z_phys->zp_acl.z_acl_extern_obj) { + /* Are we upgrading ACL? */ + if (zfsvfs->z_version <= ZPL_VERSION_FUID && + zp->z_phys->zp_acl.z_acl_version == + ZFS_ACL_VERSION_INITIAL) { + dmu_tx_hold_free(tx, + zp->z_phys->zp_acl.z_acl_extern_obj, + 0, DMU_OBJECT_END); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, + 0, aclp->z_acl_bytes); + } else { + dmu_tx_hold_write(tx, + zp->z_phys->zp_acl.z_acl_extern_obj, + 0, aclp->z_acl_bytes); + } + } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); + } + if (aclp->z_has_fuids) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + mutex_exit(&zp->z_acl_lock); + mutex_exit(&zp->z_lock); + + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + zfs_acl_free(aclp); + return (error); + } + + error = zfs_aclset_common(zp, aclp, cr, &fuidp, tx); + ASSERT(error == 0); + + zfs_log_acl(zilog, tx, zp, vsecp, fuidp); + + if (fuidp) + zfs_fuid_info_free(fuidp); + zfs_acl_free(aclp); + dmu_tx_commit(tx); +done: + mutex_exit(&zp->z_acl_lock); + mutex_exit(&zp->z_lock); + + return (error); +} + +/* + * working_mode returns the permissions that were not granted + */ +static int +zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, + boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr) +{ + zfs_acl_t *aclp; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int error; + uid_t uid = crgetuid(cr); + uint64_t who; + uint16_t type, iflags; + uint16_t entry_type; + uint32_t access_mask; + uint32_t deny_mask = 0; + zfs_ace_hdr_t *acep = NULL; + boolean_t checkit; + uid_t fowner; + uid_t gowner; + + /* + * Short circuit empty requests + */ + if (v4_mode == 0) + return (0); + + *check_privs = B_TRUE; + + if (zfsvfs->z_assign >= TXG_INITIAL) { /* ZIL replay */ + *working_mode = 0; + return (0); + } + + *working_mode = v4_mode; + + if ((v4_mode & WRITE_MASK) && + (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) && + (!IS_DEVVP(ZTOV(zp)))) { + *check_privs = B_FALSE; + return (EROFS); + } + + /* + * Only check for READONLY on non-directories. + */ + if ((v4_mode & WRITE_MASK_DATA) && + (((ZTOV(zp)->v_type != VDIR) && + (zp->z_phys->zp_flags & (ZFS_READONLY | ZFS_IMMUTABLE))) || + (ZTOV(zp)->v_type == VDIR && + (zp->z_phys->zp_flags & ZFS_IMMUTABLE)))) { + *check_privs = B_FALSE; + return (EPERM); + } + + if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) && + (zp->z_phys->zp_flags & ZFS_NOUNLINK)) { + *check_privs = B_FALSE; + return (EPERM); + } + + if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) && + (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED))) { + *check_privs = B_FALSE; + return (EACCES); + } + + /* + * The caller requested that the ACL check be skipped. This + * would only happen if the caller checked VOP_ACCESS() with a + * 32 bit ACE mask and already had the appropriate permissions. + */ + if (skipaclchk) { + *working_mode = 0; + return (0); + } + + zfs_fuid_map_ids(zp, cr, &fowner, &gowner); + + mutex_enter(&zp->z_acl_lock); + + error = zfs_acl_node_read(zp, &aclp, B_FALSE); + if (error != 0) { + mutex_exit(&zp->z_acl_lock); + return (error); + } + + while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, + &iflags, &type)) { + + if (!zfs_acl_valid_ace_type(type, iflags)) + continue; + + if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE)) + continue; + + entry_type = (iflags & ACE_TYPE_FLAGS); + + checkit = B_FALSE; + + switch (entry_type) { + case ACE_OWNER: + if (uid == fowner) + checkit = B_TRUE; + break; + case OWNING_GROUP: + who = gowner; + /*FALLTHROUGH*/ + case ACE_IDENTIFIER_GROUP: + checkit = zfs_groupmember(zfsvfs, who, cr); + break; + case ACE_EVERYONE: + checkit = B_TRUE; + break; + + /* USER Entry */ + default: + if (entry_type == 0) { + uid_t newid; + + newid = zfs_fuid_map_id(zfsvfs, who, cr, + ZFS_ACE_USER); + if (newid != IDMAP_WK_CREATOR_OWNER_UID && + uid == newid) + checkit = B_TRUE; + break; + } else { + zfs_acl_free(aclp); + mutex_exit(&zp->z_acl_lock); + return (EIO); + } + } + + if (checkit) { + uint32_t mask_matched = (access_mask & *working_mode); + + if (mask_matched) { + if (type == DENY) + deny_mask |= mask_matched; + + *working_mode &= ~mask_matched; + } + } + + /* Are we done? */ + if (*working_mode == 0) + break; + } + + mutex_exit(&zp->z_acl_lock); + zfs_acl_free(aclp); + + /* Put the found 'denies' back on the working mode */ + if (deny_mask) { + *working_mode |= deny_mask; + return (EACCES); + } else if (*working_mode) { + return (-1); + } + + return (0); +} + +static int +zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs, + cred_t *cr) +{ + if (*working_mode != ACE_WRITE_DATA) + return (EACCES); + + return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode, + check_privs, B_FALSE, cr)); +} + +/* + * Determine whether Access should be granted/denied, invoking least + * priv subsytem when a deny is determined. + */ +int +zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr) +{ + uint32_t working_mode; + int error; + int is_attr; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + boolean_t check_privs; + znode_t *xzp; + znode_t *check_zp = zp; + + is_attr = ((zp->z_phys->zp_flags & ZFS_XATTR) && + (ZTOV(zp)->v_type == VDIR)); + + /* + * If attribute then validate against base file + */ + if (is_attr) { + if ((error = zfs_zget(zp->z_zfsvfs, + zp->z_phys->zp_parent, &xzp)) != 0) { + return (error); + } + + check_zp = xzp; + + /* + * fixup mode to map to xattr perms + */ + + if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) { + mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA); + mode |= ACE_WRITE_NAMED_ATTRS; + } + + if (mode & (ACE_READ_DATA|ACE_EXECUTE)) { + mode &= ~(ACE_READ_DATA|ACE_EXECUTE); + mode |= ACE_READ_NAMED_ATTRS; + } + } + + if ((error = zfs_zaccess_common(check_zp, mode, &working_mode, + &check_privs, skipaclchk, cr)) == 0) { + if (is_attr) + VN_RELE(ZTOV(xzp)); + return (0); + } + + if (error && !check_privs) { + if (is_attr) + VN_RELE(ZTOV(xzp)); + return (error); + } + + if (error && (flags & V_APPEND)) { + error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr); + } + + if (error && check_privs) { + uid_t owner; + mode_t checkmode = 0; + + owner = zfs_fuid_map_id(zfsvfs, check_zp->z_phys->zp_uid, cr, + ZFS_OWNER); + + /* + * First check for implicit owner permission on + * read_acl/read_attributes + */ + + error = 0; + ASSERT(working_mode != 0); + + if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) && + owner == crgetuid(cr))) + working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES); + + if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS| + ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE)) + checkmode |= VREAD; + if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS| + ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE)) + checkmode |= VWRITE; + if (working_mode & ACE_EXECUTE) + checkmode |= VEXEC; + + if (checkmode) + error = secpolicy_vnode_access(cr, ZTOV(check_zp), + owner, checkmode); + + if (error == 0 && (working_mode & ACE_WRITE_OWNER)) + error = secpolicy_vnode_chown(cr, B_TRUE); + if (error == 0 && (working_mode & ACE_WRITE_ACL)) + error = secpolicy_vnode_setdac(cr, owner); + + if (error == 0 && (working_mode & + (ACE_DELETE|ACE_DELETE_CHILD))) + error = secpolicy_vnode_remove(cr); + + if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) { + error = secpolicy_vnode_chown(cr, B_FALSE); + } + if (error == 0) { + /* + * See if any bits other than those already checked + * for are still present. If so then return EACCES + */ + if (working_mode & ~(ZFS_CHECKED_MASKS)) { + error = EACCES; + } + } + } + + if (is_attr) + VN_RELE(ZTOV(xzp)); + + return (error); +} + +/* + * Translate traditional unix VREAD/VWRITE/VEXEC mode into + * native ACL format and call zfs_zaccess() + */ +int +zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr) +{ + return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr)); +} + +/* + * Access function for secpolicy_vnode_setattr + */ +int +zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr) +{ + int v4_mode = zfs_unix_to_v4(mode >> 6); + + return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr)); +} + +static int +zfs_delete_final_check(znode_t *zp, znode_t *dzp, + mode_t missing_perms, cred_t *cr) +{ + int error; + uid_t downer; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + + downer = zfs_fuid_map_id(zfsvfs, dzp->z_phys->zp_uid, cr, ZFS_OWNER); + + error = secpolicy_vnode_access(cr, ZTOV(dzp), downer, missing_perms); + + if (error == 0) + error = zfs_sticky_remove_access(dzp, zp, cr); + + return (error); +} + +/* + * Determine whether Access should be granted/deny, without + * consulting least priv subsystem. + * + * + * The following chart is the recommended NFSv4 enforcement for + * ability to delete an object. + * + * ------------------------------------------------------- + * | Parent Dir | Target Object Permissions | + * | permissions | | + * ------------------------------------------------------- + * | | ACL Allows | ACL Denies| Delete | + * | | Delete | Delete | unspecified| + * ------------------------------------------------------- + * | ACL Allows | Permit | Permit | Permit | + * | DELETE_CHILD | | + * ------------------------------------------------------- + * | ACL Denies | Permit | Deny | Deny | + * | DELETE_CHILD | | | | + * ------------------------------------------------------- + * | ACL specifies | | | | + * | only allow | Permit | Permit | Permit | + * | write and | | | | + * | execute | | | | + * ------------------------------------------------------- + * | ACL denies | | | | + * | write and | Permit | Deny | Deny | + * | execute | | | | + * ------------------------------------------------------- + * ^ + * | + * No search privilege, can't even look up file? + * + */ +int +zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr) +{ + uint32_t dzp_working_mode = 0; + uint32_t zp_working_mode = 0; + int dzp_error, zp_error; + mode_t missing_perms; + boolean_t dzpcheck_privs = B_TRUE; + boolean_t zpcheck_privs = B_TRUE; + + /* + * We want specific DELETE permissions to + * take precedence over WRITE/EXECUTE. We don't + * want an ACL such as this to mess us up. + * user:joe:write_data:deny,user:joe:delete:allow + * + * However, deny permissions may ultimately be overridden + * by secpolicy_vnode_access(). + * + * We will ask for all of the necessary permissions and then + * look at the working modes from the directory and target object + * to determine what was found. + */ + + if (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_NOUNLINK)) + return (EPERM); + + /* + * First row + * If the directory permissions allow the delete, we are done. + */ + if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD, + &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) + return (0); + + /* + * If target object has delete permission then we are done + */ + if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode, + &zpcheck_privs, B_FALSE, cr)) == 0) + return (0); + + ASSERT(dzp_error && zp_error); + + if (!dzpcheck_privs) + return (dzp_error); + if (!zpcheck_privs) + return (zp_error); + + /* + * Second row + * + * If directory returns EACCES then delete_child was denied + * due to deny delete_child. In this case send the request through + * secpolicy_vnode_remove(). We don't use zfs_delete_final_check() + * since that *could* allow the delete based on write/execute permission + * and we want delete permissions to override write/execute. + */ + + if (dzp_error == EACCES) + return (secpolicy_vnode_remove(cr)); + + /* + * Third Row + * only need to see if we have write/execute on directory. + */ + + if ((dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA, + &dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0) + return (zfs_sticky_remove_access(dzp, zp, cr)); + + if (!dzpcheck_privs) + return (dzp_error); + + /* + * Fourth row + */ + + missing_perms = (dzp_working_mode & ACE_WRITE_DATA) ? VWRITE : 0; + missing_perms |= (dzp_working_mode & ACE_EXECUTE) ? VEXEC : 0; + + ASSERT(missing_perms); + + return (zfs_delete_final_check(zp, dzp, missing_perms, cr)); + +} + +int +zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, + znode_t *tzp, cred_t *cr) +{ + int add_perm; + int error; + + if (szp->z_phys->zp_flags & ZFS_AV_QUARANTINED) + return (EACCES); + + add_perm = (ZTOV(szp)->v_type == VDIR) ? + ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE; + + /* + * Rename permissions are combination of delete permission + + * add file/subdir permission. + */ + + /* + * first make sure we do the delete portion. + * + * If that succeeds then check for add_file/add_subdir permissions + */ + + if (error = zfs_zaccess_delete(sdzp, szp, cr)) + return (error); + + /* + * If we have a tzp, see if we can delete it? + */ + if (tzp) { + if (error = zfs_zaccess_delete(tdzp, tzp, cr)) + return (error); + } + + /* + * Now check for add permissions + */ + error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr); + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_byteswap.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_byteswap.c new file mode 100644 index 000000000000..ab97f83eb0af --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_byteswap.c @@ -0,0 +1,175 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include + +void +zfs_oldace_byteswap(ace_t *ace, int ace_cnt) +{ + int i; + + for (i = 0; i != ace_cnt; i++, ace++) { + ace->a_who = BSWAP_32(ace->a_who); + ace->a_access_mask = BSWAP_32(ace->a_access_mask); + ace->a_flags = BSWAP_16(ace->a_flags); + ace->a_type = BSWAP_16(ace->a_type); + } +} + +/* + * swap ace_t and ace_oject_t + */ +void +zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout) +{ + caddr_t end; + caddr_t ptr; + zfs_ace_t *zacep; + ace_t *acep; + uint16_t entry_type; + size_t entry_size; + int ace_type; + + end = (caddr_t)buf + size; + ptr = buf; + + while (ptr < end) { + if (zfs_layout) { + zacep = (zfs_ace_t *)ptr; + zacep->z_hdr.z_access_mask = + BSWAP_32(zacep->z_hdr.z_access_mask); + zacep->z_hdr.z_flags = BSWAP_16(zacep->z_hdr.z_flags); + ace_type = zacep->z_hdr.z_type = + BSWAP_16(zacep->z_hdr.z_type); + entry_type = zacep->z_hdr.z_flags & ACE_TYPE_FLAGS; + } else { + acep = (ace_t *)ptr; + acep->a_access_mask = BSWAP_32(acep->a_access_mask); + acep->a_flags = BSWAP_16(acep->a_flags); + ace_type = acep->a_type = BSWAP_16(acep->a_type); + acep->a_who = BSWAP_32(acep->a_who); + entry_type = acep->a_flags & ACE_TYPE_FLAGS; + } + switch (entry_type) { + case ACE_OWNER: + case ACE_EVERYONE: + case (ACE_IDENTIFIER_GROUP | ACE_GROUP): + entry_size = zfs_layout ? + sizeof (zfs_ace_hdr_t) : sizeof (ace_t); + break; + case ACE_IDENTIFIER_GROUP: + default: + if (zfs_layout) { + zacep->z_fuid = BSWAP_64(zacep->z_fuid); + } + switch (ace_type) { + case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE: + case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE: + case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE: + case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE: + entry_size = zfs_layout ? + sizeof (zfs_object_ace_t) : + sizeof (ace_object_t); + break; + default: + entry_size = zfs_layout ? sizeof (zfs_ace_t) : + sizeof (ace_t); + break; + } + } + ptr = ptr + entry_size; + } +} + +/* ARGSUSED */ +void +zfs_oldacl_byteswap(void *buf, size_t size) +{ + int cnt; + + /* + * Arggh, since we don't know how many ACEs are in + * the array, we have to swap the entire block + */ + + cnt = size / sizeof (ace_t); + + zfs_oldace_byteswap((ace_t *)buf, cnt); +} + +/* ARGSUSED */ +void +zfs_acl_byteswap(void *buf, size_t size) +{ + zfs_ace_byteswap(buf, size, B_TRUE); +} + +void +zfs_znode_byteswap(void *buf, size_t size) +{ + znode_phys_t *zp = buf; + + ASSERT(size >= sizeof (znode_phys_t)); + + zp->zp_crtime[0] = BSWAP_64(zp->zp_crtime[0]); + zp->zp_crtime[1] = BSWAP_64(zp->zp_crtime[1]); + zp->zp_atime[0] = BSWAP_64(zp->zp_atime[0]); + zp->zp_atime[1] = BSWAP_64(zp->zp_atime[1]); + zp->zp_mtime[0] = BSWAP_64(zp->zp_mtime[0]); + zp->zp_mtime[1] = BSWAP_64(zp->zp_mtime[1]); + zp->zp_ctime[0] = BSWAP_64(zp->zp_ctime[0]); + zp->zp_ctime[1] = BSWAP_64(zp->zp_ctime[1]); + zp->zp_gen = BSWAP_64(zp->zp_gen); + zp->zp_mode = BSWAP_64(zp->zp_mode); + zp->zp_size = BSWAP_64(zp->zp_size); + zp->zp_parent = BSWAP_64(zp->zp_parent); + zp->zp_links = BSWAP_64(zp->zp_links); + zp->zp_xattr = BSWAP_64(zp->zp_xattr); + zp->zp_rdev = BSWAP_64(zp->zp_rdev); + zp->zp_flags = BSWAP_64(zp->zp_flags); + zp->zp_uid = BSWAP_64(zp->zp_uid); + zp->zp_gid = BSWAP_64(zp->zp_gid); + zp->zp_zap = BSWAP_64(zp->zp_zap); + zp->zp_pad[0] = BSWAP_64(zp->zp_pad[0]); + zp->zp_pad[1] = BSWAP_64(zp->zp_pad[1]); + zp->zp_pad[2] = BSWAP_64(zp->zp_pad[2]); + + zp->zp_acl.z_acl_extern_obj = BSWAP_64(zp->zp_acl.z_acl_extern_obj); + zp->zp_acl.z_acl_size = BSWAP_32(zp->zp_acl.z_acl_size); + zp->zp_acl.z_acl_version = BSWAP_16(zp->zp_acl.z_acl_version); + zp->zp_acl.z_acl_count = BSWAP_16(zp->zp_acl.z_acl_count); + if (zp->zp_acl.z_acl_version == ZFS_ACL_VERSION) { + zfs_acl_byteswap((void *)&zp->zp_acl.z_ace_data[0], + ZFS_ACE_SPACE); + } else + zfs_oldace_byteswap((ace_t *)&zp->zp_acl.z_ace_data[0], + ACE_SLOT_CNT); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ctldir.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ctldir.c new file mode 100644 index 000000000000..208fc36295d0 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ctldir.c @@ -0,0 +1,1159 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * ZFS control directory (a.k.a. ".zfs") + * + * This directory provides a common location for all ZFS meta-objects. + * Currently, this is only the 'snapshot' directory, but this may expand in the + * future. The elements are built using the GFS primitives, as the hierarchy + * does not actually exist on disk. + * + * For 'snapshot', we don't want to have all snapshots always mounted, because + * this would take up a huge amount of space in /etc/mnttab. We have three + * types of objects: + * + * ctldir ------> snapshotdir -------> snapshot + * | + * | + * V + * mounted fs + * + * The 'snapshot' node contains just enough information to lookup '..' and act + * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we + * perform an automount of the underlying filesystem and return the + * corresponding vnode. + * + * All mounts are handled automatically by the kernel, but unmounts are + * (currently) handled from user land. The main reason is that there is no + * reliable way to auto-unmount the filesystem when it's "no longer in use". + * When the user unmounts a filesystem, we call zfsctl_unmount(), which + * unmounts any snapshots within the snapshot directory. + * + * The '.zfs', '.zfs/snapshot', and all directories created under + * '.zfs/snapshot' (ie: '.zfs/snapshot/') are all GFS nodes and + * share the same vfs_t as the head filesystem (what '.zfs' lives under). + * + * File systems mounted ontop of the GFS nodes '.zfs/snapshot/' + * (ie: snapshots) are ZFS nodes and have their own unique vfs_t. + * However, vnodes within these mounted on file systems have their v_vfsp + * fields set to the head filesystem to make NFS happy (see + * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t + * so that it cannot be freed until all snapshots have been unmounted. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zfs_namecheck.h" + +typedef struct zfsctl_node { + gfs_dir_t zc_gfs_private; + uint64_t zc_id; + timestruc_t zc_cmtime; /* ctime and mtime, always the same */ +} zfsctl_node_t; + +typedef struct zfsctl_snapdir { + zfsctl_node_t sd_node; + kmutex_t sd_lock; + avl_tree_t sd_snaps; +} zfsctl_snapdir_t; + +typedef struct { + char *se_name; + vnode_t *se_root; + avl_node_t se_node; +} zfs_snapentry_t; + +static int +snapentry_compare(const void *a, const void *b) +{ + const zfs_snapentry_t *sa = a; + const zfs_snapentry_t *sb = b; + int ret = strcmp(sa->se_name, sb->se_name); + + if (ret < 0) + return (-1); + else if (ret > 0) + return (1); + else + return (0); +} + +vnodeops_t *zfsctl_ops_root; +vnodeops_t *zfsctl_ops_snapdir; +vnodeops_t *zfsctl_ops_snapshot; + +static const fs_operation_def_t zfsctl_tops_root[]; +static const fs_operation_def_t zfsctl_tops_snapdir[]; +static const fs_operation_def_t zfsctl_tops_snapshot[]; + +static vnode_t *zfsctl_mknode_snapdir(vnode_t *); +static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset); +static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *); + +static gfs_opsvec_t zfsctl_opsvec[] = { + { ".zfs", zfsctl_tops_root, &zfsctl_ops_root }, + { ".zfs/snapshot", zfsctl_tops_snapdir, &zfsctl_ops_snapdir }, + { ".zfs/snapshot/vnode", zfsctl_tops_snapshot, &zfsctl_ops_snapshot }, + { NULL } +}; + +/* + * Root directory elements. We have only a single static entry, 'snapshot'. + */ +static gfs_dirent_t zfsctl_root_entries[] = { + { "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE }, + { NULL } +}; + +/* include . and .. in the calculation */ +#define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \ + sizeof (gfs_dirent_t)) + 1) + + +/* + * Initialize the various GFS pieces we'll need to create and manipulate .zfs + * directories. This is called from the ZFS init routine, and initializes the + * vnode ops vectors that we'll be using. + */ +void +zfsctl_init(void) +{ + VERIFY(gfs_make_opsvec(zfsctl_opsvec) == 0); +} + +void +zfsctl_fini(void) +{ + /* + * Remove vfsctl vnode ops + */ + if (zfsctl_ops_root) + vn_freevnodeops(zfsctl_ops_root); + if (zfsctl_ops_snapdir) + vn_freevnodeops(zfsctl_ops_snapdir); + if (zfsctl_ops_snapshot) + vn_freevnodeops(zfsctl_ops_snapshot); + + zfsctl_ops_root = NULL; + zfsctl_ops_snapdir = NULL; + zfsctl_ops_snapshot = NULL; +} + +/* + * Return the inode number associated with the 'snapshot' directory. + */ +/* ARGSUSED */ +static ino64_t +zfsctl_root_inode_cb(vnode_t *vp, int index) +{ + ASSERT(index == 0); + return (ZFSCTL_INO_SNAPDIR); +} + +/* + * Create the '.zfs' directory. This directory is cached as part of the VFS + * structure. This results in a hold on the vfs_t. The code in zfs_umount() + * therefore checks against a vfs_count of 2 instead of 1. This reference + * is removed when the ctldir is destroyed in the unmount. + */ +void +zfsctl_create(zfsvfs_t *zfsvfs) +{ + vnode_t *vp, *rvp; + zfsctl_node_t *zcp; + + ASSERT(zfsvfs->z_ctldir == NULL); + + vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs, + zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries, + zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL); + zcp = vp->v_data; + zcp->zc_id = ZFSCTL_INO_ROOT; + + VERIFY(VFS_ROOT(zfsvfs->z_vfs, &rvp) == 0); + ZFS_TIME_DECODE(&zcp->zc_cmtime, VTOZ(rvp)->z_phys->zp_crtime); + VN_RELE(rvp); + + /* + * We're only faking the fact that we have a root of a filesystem for + * the sake of the GFS interfaces. Undo the flag manipulation it did + * for us. + */ + vp->v_flag &= ~(VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT); + + zfsvfs->z_ctldir = vp; +} + +/* + * Destroy the '.zfs' directory. Only called when the filesystem is unmounted. + * There might still be more references if we were force unmounted, but only + * new zfs_inactive() calls can occur and they don't reference .zfs + */ +void +zfsctl_destroy(zfsvfs_t *zfsvfs) +{ + VN_RELE(zfsvfs->z_ctldir); + zfsvfs->z_ctldir = NULL; +} + +/* + * Given a root znode, retrieve the associated .zfs directory. + * Add a hold to the vnode and return it. + */ +vnode_t * +zfsctl_root(znode_t *zp) +{ + ASSERT(zfs_has_ctldir(zp)); + VN_HOLD(zp->z_zfsvfs->z_ctldir); + return (zp->z_zfsvfs->z_ctldir); +} + +/* + * Common open routine. Disallow any write access. + */ +/* ARGSUSED */ +static int +zfsctl_common_open(vnode_t **vpp, int flags, cred_t *cr, caller_context_t *ct) +{ + if (flags & FWRITE) + return (EACCES); + + return (0); +} + +/* + * Common close routine. Nothing to do here. + */ +/* ARGSUSED */ +static int +zfsctl_common_close(vnode_t *vpp, int flags, int count, offset_t off, + cred_t *cr, caller_context_t *ct) +{ + return (0); +} + +/* + * Common access routine. Disallow writes. + */ +/* ARGSUSED */ +static int +zfsctl_common_access(vnode_t *vp, int mode, int flags, cred_t *cr, + caller_context_t *ct) +{ + if (mode & VWRITE) + return (EACCES); + + return (0); +} + +/* + * Common getattr function. Fill in basic information. + */ +static void +zfsctl_common_getattr(vnode_t *vp, vattr_t *vap) +{ + zfsctl_node_t *zcp = vp->v_data; + timestruc_t now; + + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_rdev = 0; + /* + * We are a purly virtual object, so we have no + * blocksize or allocated blocks. + */ + vap->va_blksize = 0; + vap->va_nblocks = 0; + vap->va_seq = 0; + vap->va_fsid = vp->v_vfsp->vfs_dev; + vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | + S_IROTH | S_IXOTH; + vap->va_type = VDIR; + /* + * We live in the now (for atime). + */ + gethrestime(&now); + vap->va_atime = now; + vap->va_mtime = vap->va_ctime = zcp->zc_cmtime; +} + +/*ARGSUSED*/ +static int +zfsctl_common_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) +{ + zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; + zfsctl_node_t *zcp = vp->v_data; + uint64_t object = zcp->zc_id; + zfid_short_t *zfid; + int i; + + ZFS_ENTER(zfsvfs); + + if (fidp->fid_len < SHORT_FID_LEN) { + fidp->fid_len = SHORT_FID_LEN; + ZFS_EXIT(zfsvfs); + return (ENOSPC); + } + + zfid = (zfid_short_t *)fidp; + + zfid->zf_len = SHORT_FID_LEN; + + for (i = 0; i < sizeof (zfid->zf_object); i++) + zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); + + /* .zfs znodes always have a generation number of 0 */ + for (i = 0; i < sizeof (zfid->zf_gen); i++) + zfid->zf_gen[i] = 0; + + ZFS_EXIT(zfsvfs); + return (0); +} + +/* + * .zfs inode namespace + * + * We need to generate unique inode numbers for all files and directories + * within the .zfs pseudo-filesystem. We use the following scheme: + * + * ENTRY ZFSCTL_INODE + * .zfs 1 + * .zfs/snapshot 2 + * .zfs/snapshot/ objectid(snap) + */ + +#define ZFSCTL_INO_SNAP(id) (id) + +/* + * Get root directory attributes. + */ +/* ARGSUSED */ +static int +zfsctl_root_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, + caller_context_t *ct) +{ + zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; + + ZFS_ENTER(zfsvfs); + vap->va_nodeid = ZFSCTL_INO_ROOT; + vap->va_nlink = vap->va_size = NROOT_ENTRIES; + + zfsctl_common_getattr(vp, vap); + ZFS_EXIT(zfsvfs); + + return (0); +} + +/* + * Special case the handling of "..". + */ +/* ARGSUSED */ +int +zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, + int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, + int *direntflags, pathname_t *realpnp) +{ + zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; + int err; + + /* + * No extended attributes allowed under .zfs + */ + if (flags & LOOKUP_XATTR) + return (EINVAL); + + ZFS_ENTER(zfsvfs); + + if (strcmp(nm, "..") == 0) { + err = VFS_ROOT(dvp->v_vfsp, vpp); + } else { + err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir, + cr, ct, direntflags, realpnp); + } + + ZFS_EXIT(zfsvfs); + + return (err); +} + +static const fs_operation_def_t zfsctl_tops_root[] = { + { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } }, + { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } }, + { VOPNAME_IOCTL, { .error = fs_inval } }, + { VOPNAME_GETATTR, { .vop_getattr = zfsctl_root_getattr } }, + { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } }, + { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } }, + { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_root_lookup } }, + { VOPNAME_SEEK, { .vop_seek = fs_seek } }, + { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } }, + { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } }, + { NULL } +}; + +static int +zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname) +{ + objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; + + if (snapshot_namecheck(name, NULL, NULL) != 0) + return (EILSEQ); + dmu_objset_name(os, zname); + if (strlen(zname) + 1 + strlen(name) >= len) + return (ENAMETOOLONG); + (void) strcat(zname, "@"); + (void) strcat(zname, name); + return (0); +} + +static int +zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr) +{ + vnode_t *svp = sep->se_root; + int error; + + ASSERT(vn_ismntpt(svp)); + + /* this will be dropped by dounmount() */ + if ((error = vn_vfswlock(svp)) != 0) + return (error); + + VN_HOLD(svp); + error = dounmount(vn_mountedvfs(svp), fflags, cr); + if (error) { + VN_RELE(svp); + return (error); + } + VFS_RELE(svp->v_vfsp); + /* + * We can't use VN_RELE(), as that will try to invoke + * zfsctl_snapdir_inactive(), which would cause us to destroy + * the sd_lock mutex held by our caller. + */ + ASSERT(svp->v_count == 1); + gfs_vop_inactive(svp, cr, NULL); + + kmem_free(sep->se_name, strlen(sep->se_name) + 1); + kmem_free(sep, sizeof (zfs_snapentry_t)); + + return (0); +} + +static void +zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm) +{ + avl_index_t where; + vfs_t *vfsp; + refstr_t *pathref; + char newpath[MAXNAMELEN]; + char *tail; + + ASSERT(MUTEX_HELD(&sdp->sd_lock)); + ASSERT(sep != NULL); + + vfsp = vn_mountedvfs(sep->se_root); + ASSERT(vfsp != NULL); + + vfs_lock_wait(vfsp); + + /* + * Change the name in the AVL tree. + */ + avl_remove(&sdp->sd_snaps, sep); + kmem_free(sep->se_name, strlen(sep->se_name) + 1); + sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); + (void) strcpy(sep->se_name, nm); + VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL); + avl_insert(&sdp->sd_snaps, sep, where); + + /* + * Change the current mountpoint info: + * - update the tail of the mntpoint path + * - update the tail of the resource path + */ + pathref = vfs_getmntpoint(vfsp); + (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath)); + VERIFY((tail = strrchr(newpath, '/')) != NULL); + *(tail+1) = '\0'; + ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath)); + (void) strcat(newpath, nm); + refstr_rele(pathref); + vfs_setmntpoint(vfsp, newpath); + + pathref = vfs_getresource(vfsp); + (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath)); + VERIFY((tail = strrchr(newpath, '@')) != NULL); + *(tail+1) = '\0'; + ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath)); + (void) strcat(newpath, nm); + refstr_rele(pathref); + vfs_setresource(vfsp, newpath); + + vfs_unlock(vfsp); +} + +/*ARGSUSED*/ +static int +zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, + cred_t *cr, caller_context_t *ct, int flags) +{ + zfsctl_snapdir_t *sdp = sdvp->v_data; + zfs_snapentry_t search, *sep; + zfsvfs_t *zfsvfs; + avl_index_t where; + char from[MAXNAMELEN], to[MAXNAMELEN]; + char real[MAXNAMELEN]; + int err; + + zfsvfs = sdvp->v_vfsp->vfs_data; + ZFS_ENTER(zfsvfs); + + if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { + err = dmu_snapshot_realname(zfsvfs->z_os, snm, real, + MAXNAMELEN, NULL); + if (err == 0) { + snm = real; + } else if (err != ENOTSUP) { + ZFS_EXIT(zfsvfs); + return (err); + } + } + + ZFS_EXIT(zfsvfs); + + err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from); + if (!err) + err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to); + if (!err) + err = zfs_secpolicy_rename_perms(from, to, cr); + if (err) + return (err); + + /* + * Cannot move snapshots out of the snapdir. + */ + if (sdvp != tdvp) + return (EINVAL); + + if (strcmp(snm, tnm) == 0) + return (0); + + mutex_enter(&sdp->sd_lock); + + search.se_name = (char *)snm; + if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) { + mutex_exit(&sdp->sd_lock); + return (ENOENT); + } + + err = dmu_objset_rename(from, to, B_FALSE); + if (err == 0) + zfsctl_rename_snap(sdp, sep, tnm); + + mutex_exit(&sdp->sd_lock); + + return (err); +} + +/* ARGSUSED */ +static int +zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, + caller_context_t *ct, int flags) +{ + zfsctl_snapdir_t *sdp = dvp->v_data; + zfs_snapentry_t *sep; + zfs_snapentry_t search; + zfsvfs_t *zfsvfs; + char snapname[MAXNAMELEN]; + char real[MAXNAMELEN]; + int err; + + zfsvfs = dvp->v_vfsp->vfs_data; + ZFS_ENTER(zfsvfs); + + if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { + + err = dmu_snapshot_realname(zfsvfs->z_os, name, real, + MAXNAMELEN, NULL); + if (err == 0) { + name = real; + } else if (err != ENOTSUP) { + ZFS_EXIT(zfsvfs); + return (err); + } + } + + ZFS_EXIT(zfsvfs); + + err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname); + if (!err) + err = zfs_secpolicy_destroy_perms(snapname, cr); + if (err) + return (err); + + mutex_enter(&sdp->sd_lock); + + search.se_name = name; + sep = avl_find(&sdp->sd_snaps, &search, NULL); + if (sep) { + avl_remove(&sdp->sd_snaps, sep); + err = zfsctl_unmount_snap(sep, MS_FORCE, cr); + if (err) + avl_add(&sdp->sd_snaps, sep); + else + err = dmu_objset_destroy(snapname); + } else { + err = ENOENT; + } + + mutex_exit(&sdp->sd_lock); + + return (err); +} + +/* + * This creates a snapshot under '.zfs/snapshot'. + */ +/* ARGSUSED */ +static int +zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, + cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp) +{ + zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; + char name[MAXNAMELEN]; + int err; + static enum symfollow follow = NO_FOLLOW; + static enum uio_seg seg = UIO_SYSSPACE; + + if (snapshot_namecheck(dirname, NULL, NULL) != 0) + return (EILSEQ); + + dmu_objset_name(zfsvfs->z_os, name); + + *vpp = NULL; + + err = zfs_secpolicy_snapshot_perms(name, cr); + if (err) + return (err); + + if (err == 0) { + err = dmu_objset_snapshot(name, dirname, B_FALSE); + if (err) + return (err); + err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp); + } + + return (err); +} + +/* + * Lookup entry point for the 'snapshot' directory. Try to open the + * snapshot if it exist, creating the pseudo filesystem vnode as necessary. + * Perform a mount of the associated dataset on top of the vnode. + */ +/* ARGSUSED */ +static int +zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, + int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, + int *direntflags, pathname_t *realpnp) +{ + zfsctl_snapdir_t *sdp = dvp->v_data; + objset_t *snap; + char snapname[MAXNAMELEN]; + char real[MAXNAMELEN]; + char *mountpoint; + zfs_snapentry_t *sep, search; + struct mounta margs; + vfs_t *vfsp; + size_t mountpoint_len; + avl_index_t where; + zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; + int err; + + /* + * No extended attributes allowed under .zfs + */ + if (flags & LOOKUP_XATTR) + return (EINVAL); + + ASSERT(dvp->v_type == VDIR); + + if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) + return (0); + + /* + * If we get a recursive call, that means we got called + * from the domount() code while it was trying to look up the + * spec (which looks like a local path for zfs). We need to + * add some flag to domount() to tell it not to do this lookup. + */ + if (MUTEX_HELD(&sdp->sd_lock)) + return (ENOENT); + + ZFS_ENTER(zfsvfs); + + if (flags & FIGNORECASE) { + boolean_t conflict = B_FALSE; + + err = dmu_snapshot_realname(zfsvfs->z_os, nm, real, + MAXNAMELEN, &conflict); + if (err == 0) { + nm = real; + } else if (err != ENOTSUP) { + ZFS_EXIT(zfsvfs); + return (err); + } + if (realpnp) + (void) strlcpy(realpnp->pn_buf, nm, + realpnp->pn_bufsize); + if (conflict && direntflags) + *direntflags = ED_CASE_CONFLICT; + } + + mutex_enter(&sdp->sd_lock); + search.se_name = (char *)nm; + if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) { + *vpp = sep->se_root; + VN_HOLD(*vpp); + err = traverse(vpp); + if (err) { + VN_RELE(*vpp); + *vpp = NULL; + } else if (*vpp == sep->se_root) { + /* + * The snapshot was unmounted behind our backs, + * try to remount it. + */ + goto domount; + } else { + /* + * VROOT was set during the traverse call. We need + * to clear it since we're pretending to be part + * of our parent's vfs. + */ + (*vpp)->v_flag &= ~VROOT; + } + mutex_exit(&sdp->sd_lock); + ZFS_EXIT(zfsvfs); + return (err); + } + + /* + * The requested snapshot is not currently mounted, look it up. + */ + err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname); + if (err) { + mutex_exit(&sdp->sd_lock); + ZFS_EXIT(zfsvfs); + /* + * handle "ls *" or "?" in a graceful manner, + * forcing EILSEQ to ENOENT. + * Since shell ultimately passes "*" or "?" as name to lookup + */ + return (err == EILSEQ ? ENOENT : err); + } + if (dmu_objset_open(snapname, DMU_OST_ZFS, + DS_MODE_USER | DS_MODE_READONLY, &snap) != 0) { + mutex_exit(&sdp->sd_lock); + ZFS_EXIT(zfsvfs); + return (ENOENT); + } + + sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP); + sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); + (void) strcpy(sep->se_name, nm); + *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap)); + avl_insert(&sdp->sd_snaps, sep, where); + + dmu_objset_close(snap); +domount: + mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) + + strlen("/.zfs/snapshot/") + strlen(nm) + 1; + mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); + (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s", + refstr_value(dvp->v_vfsp->vfs_mntpt), nm); + + margs.spec = snapname; + margs.dir = mountpoint; + margs.flags = MS_SYSSPACE | MS_NOMNTTAB; + margs.fstype = "zfs"; + margs.dataptr = NULL; + margs.datalen = 0; + margs.optptr = NULL; + margs.optlen = 0; + + err = domount("zfs", &margs, *vpp, kcred, &vfsp); + kmem_free(mountpoint, mountpoint_len); + + if (err == 0) { + /* + * Return the mounted root rather than the covered mount point. + * Takes the GFS vnode at .zfs/snapshot/ and returns + * the ZFS vnode mounted on top of the GFS node. This ZFS + * vnode is the root the newly created vfsp. + */ + VFS_RELE(vfsp); + err = traverse(vpp); + } + + if (err == 0) { + /* + * Fix up the root vnode mounted on .zfs/snapshot/. + * + * This is where we lie about our v_vfsp in order to + * make .zfs/snapshot/ accessible over NFS + * without requiring manual mounts of . + */ + ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); + VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; + (*vpp)->v_vfsp = zfsvfs->z_vfs; + (*vpp)->v_flag &= ~VROOT; + } + mutex_exit(&sdp->sd_lock); + ZFS_EXIT(zfsvfs); + + /* + * If we had an error, drop our hold on the vnode and + * zfsctl_snapshot_inactive() will clean up. + */ + if (err) { + VN_RELE(*vpp); + *vpp = NULL; + } + return (err); +} + +/* ARGSUSED */ +static int +zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp, + offset_t *offp, offset_t *nextp, void *data, int flags) +{ + zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; + char snapname[MAXNAMELEN]; + uint64_t id, cookie; + boolean_t case_conflict; + int error; + + ZFS_ENTER(zfsvfs); + + cookie = *offp; + error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id, + &cookie, &case_conflict); + if (error) { + ZFS_EXIT(zfsvfs); + if (error == ENOENT) { + *eofp = 1; + return (0); + } + return (error); + } + + if (flags & V_RDDIR_ENTFLAGS) { + edirent_t *eodp = dp; + + (void) strcpy(eodp->ed_name, snapname); + eodp->ed_ino = ZFSCTL_INO_SNAP(id); + eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0; + } else { + struct dirent64 *odp = dp; + + (void) strcpy(odp->d_name, snapname); + odp->d_ino = ZFSCTL_INO_SNAP(id); + } + *nextp = cookie; + + ZFS_EXIT(zfsvfs); + + return (0); +} + +/* + * pvp is the '.zfs' directory (zfsctl_node_t). + * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t). + * + * This function is the callback to create a GFS vnode for '.zfs/snapshot' + * when a lookup is performed on .zfs for "snapshot". + */ +vnode_t * +zfsctl_mknode_snapdir(vnode_t *pvp) +{ + vnode_t *vp; + zfsctl_snapdir_t *sdp; + + vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp, + zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN, + zfsctl_snapdir_readdir_cb, NULL); + sdp = vp->v_data; + sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR; + sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime; + mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL); + avl_create(&sdp->sd_snaps, snapentry_compare, + sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); + return (vp); +} + +/* ARGSUSED */ +static int +zfsctl_snapdir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, + caller_context_t *ct) +{ + zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; + zfsctl_snapdir_t *sdp = vp->v_data; + + ZFS_ENTER(zfsvfs); + zfsctl_common_getattr(vp, vap); + vap->va_nodeid = gfs_file_inode(vp); + vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2; + ZFS_EXIT(zfsvfs); + + return (0); +} + +/* ARGSUSED */ +static void +zfsctl_snapdir_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) +{ + zfsctl_snapdir_t *sdp = vp->v_data; + void *private; + + private = gfs_dir_inactive(vp); + if (private != NULL) { + ASSERT(avl_numnodes(&sdp->sd_snaps) == 0); + mutex_destroy(&sdp->sd_lock); + avl_destroy(&sdp->sd_snaps); + kmem_free(private, sizeof (zfsctl_snapdir_t)); + } +} + +static const fs_operation_def_t zfsctl_tops_snapdir[] = { + { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } }, + { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } }, + { VOPNAME_IOCTL, { .error = fs_inval } }, + { VOPNAME_GETATTR, { .vop_getattr = zfsctl_snapdir_getattr } }, + { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } }, + { VOPNAME_RENAME, { .vop_rename = zfsctl_snapdir_rename } }, + { VOPNAME_RMDIR, { .vop_rmdir = zfsctl_snapdir_remove } }, + { VOPNAME_MKDIR, { .vop_mkdir = zfsctl_snapdir_mkdir } }, + { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } }, + { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_snapdir_lookup } }, + { VOPNAME_SEEK, { .vop_seek = fs_seek } }, + { VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapdir_inactive } }, + { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } }, + { NULL } +}; + +/* + * pvp is the GFS vnode '.zfs/snapshot'. + * + * This creates a GFS node under '.zfs/snapshot' representing each + * snapshot. This newly created GFS node is what we mount snapshot + * vfs_t's ontop of. + */ +static vnode_t * +zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset) +{ + vnode_t *vp; + zfsctl_node_t *zcp; + + vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp, + zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL); + zcp = vp->v_data; + zcp->zc_id = objset; + VFS_HOLD(vp->v_vfsp); + + return (vp); +} + +static void +zfsctl_snapshot_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) +{ + zfsctl_snapdir_t *sdp; + zfs_snapentry_t *sep, *next; + vnode_t *dvp; + + VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0); + sdp = dvp->v_data; + + mutex_enter(&sdp->sd_lock); + + if (vp->v_count > 1) { + mutex_exit(&sdp->sd_lock); + return; + } + ASSERT(!vn_ismntpt(vp)); + + sep = avl_first(&sdp->sd_snaps); + while (sep != NULL) { + next = AVL_NEXT(&sdp->sd_snaps, sep); + + if (sep->se_root == vp) { + avl_remove(&sdp->sd_snaps, sep); + kmem_free(sep->se_name, strlen(sep->se_name) + 1); + kmem_free(sep, sizeof (zfs_snapentry_t)); + break; + } + sep = next; + } + ASSERT(sep != NULL); + + mutex_exit(&sdp->sd_lock); + VN_RELE(dvp); + VFS_RELE(vp->v_vfsp); + + /* + * Dispose of the vnode for the snapshot mount point. + * This is safe to do because once this entry has been removed + * from the AVL tree, it can't be found again, so cannot become + * "active". If we lookup the same name again we will end up + * creating a new vnode. + */ + gfs_vop_inactive(vp, cr, ct); +} + + +/* + * These VP's should never see the light of day. They should always + * be covered. + */ +static const fs_operation_def_t zfsctl_tops_snapshot[] = { + VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapshot_inactive }, + NULL, NULL +}; + +int +zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + vnode_t *dvp, *vp; + zfsctl_snapdir_t *sdp; + zfsctl_node_t *zcp; + zfs_snapentry_t *sep; + int error; + + ASSERT(zfsvfs->z_ctldir != NULL); + error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp, + NULL, 0, NULL, kcred, NULL, NULL, NULL); + if (error != 0) + return (error); + sdp = dvp->v_data; + + mutex_enter(&sdp->sd_lock); + sep = avl_first(&sdp->sd_snaps); + while (sep != NULL) { + vp = sep->se_root; + zcp = vp->v_data; + if (zcp->zc_id == objsetid) + break; + + sep = AVL_NEXT(&sdp->sd_snaps, sep); + } + + if (sep != NULL) { + VN_HOLD(vp); + /* + * Return the mounted root rather than the covered mount point. + * Takes the GFS vnode at .zfs/snapshot/ + * and returns the ZFS vnode mounted on top of the GFS node. + * This ZFS vnode is the root of the vfs for objset 'objsetid'. + */ + error = traverse(&vp); + if (error == 0) { + if (vp == sep->se_root) + error = EINVAL; + else + *zfsvfsp = VTOZ(vp)->z_zfsvfs; + } + mutex_exit(&sdp->sd_lock); + VN_RELE(vp); + } else { + error = EINVAL; + mutex_exit(&sdp->sd_lock); + } + + VN_RELE(dvp); + + return (error); +} + +/* + * Unmount any snapshots for the given filesystem. This is called from + * zfs_umount() - if we have a ctldir, then go through and unmount all the + * snapshots. + */ +int +zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + vnode_t *dvp; + zfsctl_snapdir_t *sdp; + zfs_snapentry_t *sep, *next; + int error; + + ASSERT(zfsvfs->z_ctldir != NULL); + error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp, + NULL, 0, NULL, cr, NULL, NULL, NULL); + if (error != 0) + return (error); + sdp = dvp->v_data; + + mutex_enter(&sdp->sd_lock); + + sep = avl_first(&sdp->sd_snaps); + while (sep != NULL) { + next = AVL_NEXT(&sdp->sd_snaps, sep); + + /* + * If this snapshot is not mounted, then it must + * have just been unmounted by somebody else, and + * will be cleaned up by zfsctl_snapdir_inactive(). + */ + if (vn_ismntpt(sep->se_root)) { + avl_remove(&sdp->sd_snaps, sep); + error = zfsctl_unmount_snap(sep, fflags, cr); + if (error) { + avl_add(&sdp->sd_snaps, sep); + break; + } + } + sep = next; + } + + mutex_exit(&sdp->sd_lock); + VN_RELE(dvp); + + return (error); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_dir.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_dir.c new file mode 100644 index 000000000000..1ec4932646e9 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_dir.c @@ -0,0 +1,977 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fs/fs_subr.h" +#include +#include +#include +#include +#include +#include +#include + +/* + * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups + * of names after deciding which is the appropriate lookup interface. + */ +static int +zfs_match_find(zfsvfs_t *zfsvfs, znode_t *dzp, char *name, boolean_t exact, + boolean_t update, int *deflags, pathname_t *rpnp, uint64_t *zoid) +{ + int error; + + if (zfsvfs->z_norm) { + matchtype_t mt = MT_FIRST; + boolean_t conflict = B_FALSE; + size_t bufsz = 0; + char *buf = NULL; + + if (rpnp) { + buf = rpnp->pn_buf; + bufsz = rpnp->pn_bufsize; + } + if (exact) + mt = MT_EXACT; + /* + * In the non-mixed case we only expect there would ever + * be one match, but we need to use the normalizing lookup. + */ + error = zap_lookup_norm(zfsvfs->z_os, dzp->z_id, name, 8, 1, + zoid, mt, buf, bufsz, &conflict); + if (!error && deflags) + *deflags = conflict ? ED_CASE_CONFLICT : 0; + } else { + error = zap_lookup(zfsvfs->z_os, dzp->z_id, name, 8, 1, zoid); + } + *zoid = ZFS_DIRENT_OBJ(*zoid); + + if (error == ENOENT && update) + dnlc_update(ZTOV(dzp), name, DNLC_NO_VNODE); + + return (error); +} + +/* + * Lock a directory entry. A dirlock on protects that name + * in dzp's directory zap object. As long as you hold a dirlock, you can + * assume two things: (1) dzp cannot be reaped, and (2) no other thread + * can change the zap entry for (i.e. link or unlink) this name. + * + * Input arguments: + * dzp - znode for directory + * name - name of entry to lock + * flag - ZNEW: if the entry already exists, fail with EEXIST. + * ZEXISTS: if the entry does not exist, fail with ENOENT. + * ZSHARED: allow concurrent access with other ZSHARED callers. + * ZXATTR: we want dzp's xattr directory + * ZCILOOK: On a mixed sensitivity file system, + * this lookup should be case-insensitive. + * ZCIEXACT: On a purely case-insensitive file system, + * this lookup should be case-sensitive. + * ZRENAMING: we are locking for renaming, force narrow locks + * + * Output arguments: + * zpp - pointer to the znode for the entry (NULL if there isn't one) + * dlpp - pointer to the dirlock for this entry (NULL on error) + * direntflags - (case-insensitive lookup only) + * flags if multiple case-sensitive matches exist in directory + * realpnp - (case-insensitive lookup only) + * actual name matched within the directory + * + * Return value: 0 on success or errno on failure. + * + * NOTE: Always checks for, and rejects, '.' and '..'. + * NOTE: For case-insensitive file systems we take wide locks (see below), + * but return znode pointers to a single match. + */ +int +zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp, + int flag, int *direntflags, pathname_t *realpnp) +{ + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zfs_dirlock_t *dl; + boolean_t update; + boolean_t exact; + uint64_t zoid; + vnode_t *vp = NULL; + int error = 0; + int cmpflags; + + *zpp = NULL; + *dlpp = NULL; + + /* + * Verify that we are not trying to lock '.', '..', or '.zfs' + */ + if (name[0] == '.' && + (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')) || + zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) + return (EEXIST); + + /* + * Case sensitivity and normalization preferences are set when + * the file system is created. These are stored in the + * zfsvfs->z_case and zfsvfs->z_norm fields. These choices + * affect what vnodes can be cached in the DNLC, how we + * perform zap lookups, and the "width" of our dirlocks. + * + * A normal dirlock locks a single name. Note that with + * normalization a name can be composed multiple ways, but + * when normalized, these names all compare equal. A wide + * dirlock locks multiple names. We need these when the file + * system is supporting mixed-mode access. It is sometimes + * necessary to lock all case permutations of file name at + * once so that simultaneous case-insensitive/case-sensitive + * behaves as rationally as possible. + */ + + /* + * Decide if exact matches should be requested when performing + * a zap lookup on file systems supporting case-insensitive + * access. + */ + exact = + ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE) && (flag & ZCIEXACT)) || + ((zfsvfs->z_case == ZFS_CASE_MIXED) && !(flag & ZCILOOK)); + + /* + * Only look in or update the DNLC if we are looking for the + * name on a file system that does not require normalization + * or case folding. We can also look there if we happen to be + * on a non-normalizing, mixed sensitivity file system IF we + * are looking for the exact name. + * + * Maybe can add TO-UPPERed version of name to dnlc in ci-only + * case for performance improvement? + */ + update = !zfsvfs->z_norm || + ((zfsvfs->z_case == ZFS_CASE_MIXED) && + !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK)); + + /* + * ZRENAMING indicates we are in a situation where we should + * take narrow locks regardless of the file system's + * preferences for normalizing and case folding. This will + * prevent us deadlocking trying to grab the same wide lock + * twice if the two names happen to be case-insensitive + * matches. + */ + if (flag & ZRENAMING) + cmpflags = 0; + else + cmpflags = zfsvfs->z_norm; + + /* + * Wait until there are no locks on this name. + */ + rw_enter(&dzp->z_name_lock, RW_READER); + mutex_enter(&dzp->z_lock); + for (;;) { + if (dzp->z_unlinked) { + mutex_exit(&dzp->z_lock); + rw_exit(&dzp->z_name_lock); + return (ENOENT); + } + for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) { + if ((u8_strcmp(name, dl->dl_name, 0, cmpflags, + U8_UNICODE_LATEST, &error) == 0) || error != 0) + break; + } + if (error != 0) { + mutex_exit(&dzp->z_lock); + rw_exit(&dzp->z_name_lock); + return (ENOENT); + } + if (dl == NULL) { + /* + * Allocate a new dirlock and add it to the list. + */ + dl = kmem_alloc(sizeof (zfs_dirlock_t), KM_SLEEP); + cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL); + dl->dl_name = name; + dl->dl_sharecnt = 0; + dl->dl_namesize = 0; + dl->dl_dzp = dzp; + dl->dl_next = dzp->z_dirlocks; + dzp->z_dirlocks = dl; + break; + } + if ((flag & ZSHARED) && dl->dl_sharecnt != 0) + break; + cv_wait(&dl->dl_cv, &dzp->z_lock); + } + + if ((flag & ZSHARED) && ++dl->dl_sharecnt > 1 && dl->dl_namesize == 0) { + /* + * We're the second shared reference to dl. Make a copy of + * dl_name in case the first thread goes away before we do. + * Note that we initialize the new name before storing its + * pointer into dl_name, because the first thread may load + * dl->dl_name at any time. He'll either see the old value, + * which is his, or the new shared copy; either is OK. + */ + dl->dl_namesize = strlen(dl->dl_name) + 1; + name = kmem_alloc(dl->dl_namesize, KM_SLEEP); + bcopy(dl->dl_name, name, dl->dl_namesize); + dl->dl_name = name; + } + + mutex_exit(&dzp->z_lock); + + /* + * We have a dirlock on the name. (Note that it is the dirlock, + * not the dzp's z_lock, that protects the name in the zap object.) + * See if there's an object by this name; if so, put a hold on it. + */ + if (flag & ZXATTR) { + zoid = dzp->z_phys->zp_xattr; + error = (zoid == 0 ? ENOENT : 0); + } else { + if (update) + vp = dnlc_lookup(ZTOV(dzp), name); + if (vp == DNLC_NO_VNODE) { + VN_RELE(vp); + error = ENOENT; + } else if (vp) { + if (flag & ZNEW) { + zfs_dirent_unlock(dl); + VN_RELE(vp); + return (EEXIST); + } + *dlpp = dl; + *zpp = VTOZ(vp); + return (0); + } else { + error = zfs_match_find(zfsvfs, dzp, name, exact, + update, direntflags, realpnp, &zoid); + } + } + if (error) { + if (error != ENOENT || (flag & ZEXISTS)) { + zfs_dirent_unlock(dl); + return (error); + } + } else { + if (flag & ZNEW) { + zfs_dirent_unlock(dl); + return (EEXIST); + } + error = zfs_zget(zfsvfs, zoid, zpp); + if (error) { + zfs_dirent_unlock(dl); + return (error); + } + if (!(flag & ZXATTR) && update) + dnlc_update(ZTOV(dzp), name, ZTOV(*zpp)); + } + + *dlpp = dl; + + return (0); +} + +/* + * Unlock this directory entry and wake anyone who was waiting for it. + */ +void +zfs_dirent_unlock(zfs_dirlock_t *dl) +{ + znode_t *dzp = dl->dl_dzp; + zfs_dirlock_t **prev_dl, *cur_dl; + + mutex_enter(&dzp->z_lock); + rw_exit(&dzp->z_name_lock); + if (dl->dl_sharecnt > 1) { + dl->dl_sharecnt--; + mutex_exit(&dzp->z_lock); + return; + } + prev_dl = &dzp->z_dirlocks; + while ((cur_dl = *prev_dl) != dl) + prev_dl = &cur_dl->dl_next; + *prev_dl = dl->dl_next; + cv_broadcast(&dl->dl_cv); + mutex_exit(&dzp->z_lock); + + if (dl->dl_namesize != 0) + kmem_free(dl->dl_name, dl->dl_namesize); + cv_destroy(&dl->dl_cv); + kmem_free(dl, sizeof (*dl)); +} + +/* + * Look up an entry in a directory. + * + * NOTE: '.' and '..' are handled as special cases because + * no directory entries are actually stored for them. If this is + * the root of a filesystem, then '.zfs' is also treated as a + * special pseudo-directory. + */ +int +zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp, int flags, + int *deflg, pathname_t *rpnp) +{ + zfs_dirlock_t *dl; + znode_t *zp; + int error = 0; + + if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) { + *vpp = ZTOV(dzp); + VN_HOLD(*vpp); + } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) { + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + /* + * If we are a snapshot mounted under .zfs, return + * the vp for the snapshot directory. + */ + if (dzp->z_phys->zp_parent == dzp->z_id && + zfsvfs->z_parent != zfsvfs) { + error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir, + "snapshot", vpp, NULL, 0, NULL, kcred, + NULL, NULL, NULL); + return (error); + } + rw_enter(&dzp->z_parent_lock, RW_READER); + error = zfs_zget(zfsvfs, dzp->z_phys->zp_parent, &zp); + if (error == 0) + *vpp = ZTOV(zp); + rw_exit(&dzp->z_parent_lock); + } else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) { + *vpp = zfsctl_root(dzp); + } else { + int zf; + + zf = ZEXISTS | ZSHARED; + if (flags & FIGNORECASE) + zf |= ZCILOOK; + + error = zfs_dirent_lock(&dl, dzp, name, &zp, zf, deflg, rpnp); + if (error == 0) { + *vpp = ZTOV(zp); + zfs_dirent_unlock(dl); + dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */ + } + rpnp = NULL; + } + + if ((flags & FIGNORECASE) && rpnp && !error) + (void) strlcpy(rpnp->pn_buf, name, rpnp->pn_bufsize); + + return (error); +} + +/* + * unlinked Set (formerly known as the "delete queue") Error Handling + * + * When dealing with the unlinked set, we dmu_tx_hold_zap(), but we + * don't specify the name of the entry that we will be manipulating. We + * also fib and say that we won't be adding any new entries to the + * unlinked set, even though we might (this is to lower the minimum file + * size that can be deleted in a full filesystem). So on the small + * chance that the nlink list is using a fat zap (ie. has more than + * 2000 entries), we *may* not pre-read a block that's needed. + * Therefore it is remotely possible for some of the assertions + * regarding the unlinked set below to fail due to i/o error. On a + * nondebug system, this will result in the space being leaked. + */ +void +zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + + ASSERT(zp->z_unlinked); + ASSERT3U(zp->z_phys->zp_links, ==, 0); + + VERIFY3U(0, ==, + zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); +} + +/* + * Clean up any znodes that had no links when we either crashed or + * (force) umounted the file system. + */ +void +zfs_unlinked_drain(zfsvfs_t *zfsvfs) +{ + zap_cursor_t zc; + zap_attribute_t zap; + dmu_object_info_t doi; + znode_t *zp; + int error; + + /* + * Interate over the contents of the unlinked set. + */ + for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj); + zap_cursor_retrieve(&zc, &zap) == 0; + zap_cursor_advance(&zc)) { + + /* + * See what kind of object we have in list + */ + + error = dmu_object_info(zfsvfs->z_os, + zap.za_first_integer, &doi); + if (error != 0) + continue; + + ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) || + (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS)); + /* + * We need to re-mark these list entries for deletion, + * so we pull them back into core and set zp->z_unlinked. + */ + error = zfs_zget(zfsvfs, zap.za_first_integer, &zp); + + /* + * We may pick up znodes that are already marked for deletion. + * This could happen during the purge of an extended attribute + * directory. All we need to do is skip over them, since they + * are already in the system marked z_unlinked. + */ + if (error != 0) + continue; + + zp->z_unlinked = B_TRUE; + VN_RELE(ZTOV(zp)); + } + zap_cursor_fini(&zc); +} + +/* + * Delete the entire contents of a directory. Return a count + * of the number of entries that could not be deleted. If we encounter + * an error, return a count of at least one so that the directory stays + * in the unlinked set. + * + * NOTE: this function assumes that the directory is inactive, + * so there is no need to lock its entries before deletion. + * Also, it assumes the directory contents is *only* regular + * files. + */ +static int +zfs_purgedir(znode_t *dzp) +{ + zap_cursor_t zc; + zap_attribute_t zap; + znode_t *xzp; + dmu_tx_t *tx; + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zfs_dirlock_t dl; + int skipped = 0; + int error; + + for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); + (error = zap_cursor_retrieve(&zc, &zap)) == 0; + zap_cursor_advance(&zc)) { + error = zfs_zget(zfsvfs, + ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); + if (error) { + skipped += 1; + continue; + } + + ASSERT((ZTOV(xzp)->v_type == VREG) || + (ZTOV(xzp)->v_type == VLNK)); + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, dzp->z_id); + dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); + dmu_tx_hold_bonus(tx, xzp->z_id); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + VN_RELE(ZTOV(xzp)); + skipped += 1; + continue; + } + bzero(&dl, sizeof (dl)); + dl.dl_dzp = dzp; + dl.dl_name = zap.za_name; + + error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); + if (error) + skipped += 1; + dmu_tx_commit(tx); + + VN_RELE(ZTOV(xzp)); + } + zap_cursor_fini(&zc); + if (error != ENOENT) + skipped += 1; + return (skipped); +} + +void +zfs_rmnode(znode_t *zp) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + objset_t *os = zfsvfs->z_os; + znode_t *xzp = NULL; + dmu_tx_t *tx; + uint64_t acl_obj; + int error; + + ASSERT(ZTOV(zp)->v_count == 0); + ASSERT(zp->z_phys->zp_links == 0); + + /* + * If this is a ZIL replay then leave the object in the unlinked set. + * Otherwise we can get a deadlock, because the delete can be + * quite large and span multiple tx's and txgs, but each replay + * creates a tx to atomically run the replay function and mark the + * replay record as complete. We deadlock trying to start a tx in + * a new txg to further the deletion but can't because the replay + * tx hasn't finished. + * + * We actually delete the object if we get a failure to create an + * object in zil_replay_log_record(), or after calling zil_replay(). + */ + if (zfsvfs->z_assign >= TXG_INITIAL) { + zfs_znode_dmu_fini(zp); + zfs_znode_free(zp); + return; + } + + /* + * If this is an attribute directory, purge its contents. + */ + if (ZTOV(zp)->v_type == VDIR && (zp->z_phys->zp_flags & ZFS_XATTR)) { + if (zfs_purgedir(zp) != 0) { + /* + * Not enough space to delete some xattrs. + * Leave it in the unlinked set. + */ + zfs_znode_dmu_fini(zp); + zfs_znode_free(zp); + return; + } + } + + /* + * Free up all the data in the file. + */ + error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END); + if (error) { + /* + * Not enough space. Leave the file in the unlinked set. + */ + zfs_znode_dmu_fini(zp); + zfs_znode_free(zp); + return; + } + + /* + * If the file has extended attributes, we're going to unlink + * the xattr dir. + */ + if (zp->z_phys->zp_xattr) { + error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); + ASSERT(error == 0); + } + + acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj; + + /* + * Set up the final transaction. + */ + tx = dmu_tx_create(os); + dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + if (xzp) { + dmu_tx_hold_bonus(tx, xzp->z_id); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL); + } + if (acl_obj) + dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + /* + * Not enough space to delete the file. Leave it in the + * unlinked set, leaking it until the fs is remounted (at + * which point we'll call zfs_unlinked_drain() to process it). + */ + dmu_tx_abort(tx); + zfs_znode_dmu_fini(zp); + zfs_znode_free(zp); + goto out; + } + + if (xzp) { + dmu_buf_will_dirty(xzp->z_dbuf, tx); + mutex_enter(&xzp->z_lock); + xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */ + xzp->z_phys->zp_links = 0; /* no more links to it */ + mutex_exit(&xzp->z_lock); + zfs_unlinked_add(xzp, tx); + } + + /* Remove this znode from the unlinked set */ + VERIFY3U(0, ==, + zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); + + zfs_znode_delete(zp, tx); + + dmu_tx_commit(tx); +out: + if (xzp) + VN_RELE(ZTOV(xzp)); +} + +static uint64_t +zfs_dirent(znode_t *zp) +{ + uint64_t de = zp->z_id; + if (zp->z_zfsvfs->z_version >= ZPL_VERSION_DIRENT_TYPE) + de |= IFTODT((zp)->z_phys->zp_mode) << 60; + return (de); +} + +/* + * Link zp into dl. Can only fail if zp has been unlinked. + */ +int +zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) +{ + znode_t *dzp = dl->dl_dzp; + vnode_t *vp = ZTOV(zp); + uint64_t value; + int zp_is_dir = (vp->v_type == VDIR); + int error; + + dmu_buf_will_dirty(zp->z_dbuf, tx); + mutex_enter(&zp->z_lock); + + if (!(flag & ZRENAMING)) { + if (zp->z_unlinked) { /* no new links to unlinked zp */ + ASSERT(!(flag & (ZNEW | ZEXISTS))); + mutex_exit(&zp->z_lock); + return (ENOENT); + } + zp->z_phys->zp_links++; + } + zp->z_phys->zp_parent = dzp->z_id; /* dzp is now zp's parent */ + + if (!(flag & ZNEW)) + zfs_time_stamper_locked(zp, STATE_CHANGED, tx); + mutex_exit(&zp->z_lock); + + dmu_buf_will_dirty(dzp->z_dbuf, tx); + mutex_enter(&dzp->z_lock); + dzp->z_phys->zp_size++; /* one dirent added */ + dzp->z_phys->zp_links += zp_is_dir; /* ".." link from zp */ + zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx); + mutex_exit(&dzp->z_lock); + + value = zfs_dirent(zp); + error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, + 8, 1, &value, tx); + ASSERT(error == 0); + + dnlc_update(ZTOV(dzp), dl->dl_name, vp); + + return (0); +} + +/* + * Unlink zp from dl, and mark zp for deletion if this was the last link. + * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST). + * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list. + * If it's non-NULL, we use it to indicate whether the znode needs deletion, + * and it's the caller's job to do it. + */ +int +zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, + boolean_t *unlinkedp) +{ + znode_t *dzp = dl->dl_dzp; + vnode_t *vp = ZTOV(zp); + int zp_is_dir = (vp->v_type == VDIR); + boolean_t unlinked = B_FALSE; + int error; + + dnlc_remove(ZTOV(dzp), dl->dl_name); + + if (!(flag & ZRENAMING)) { + dmu_buf_will_dirty(zp->z_dbuf, tx); + + if (vn_vfswlock(vp)) /* prevent new mounts on zp */ + return (EBUSY); + + if (vn_ismntpt(vp)) { /* don't remove mount point */ + vn_vfsunlock(vp); + return (EBUSY); + } + + mutex_enter(&zp->z_lock); + if (zp_is_dir && !zfs_dirempty(zp)) { /* dir not empty */ + mutex_exit(&zp->z_lock); + vn_vfsunlock(vp); + return (EEXIST); + } + if (zp->z_phys->zp_links <= zp_is_dir) { + zfs_panic_recover("zfs: link count on %s is %u, " + "should be at least %u", + zp->z_vnode->v_path ? zp->z_vnode->v_path : + "", (int)zp->z_phys->zp_links, + zp_is_dir + 1); + zp->z_phys->zp_links = zp_is_dir + 1; + } + if (--zp->z_phys->zp_links == zp_is_dir) { + zp->z_unlinked = B_TRUE; + zp->z_phys->zp_links = 0; + unlinked = B_TRUE; + } else { + zfs_time_stamper_locked(zp, STATE_CHANGED, tx); + } + mutex_exit(&zp->z_lock); + vn_vfsunlock(vp); + } + + dmu_buf_will_dirty(dzp->z_dbuf, tx); + mutex_enter(&dzp->z_lock); + dzp->z_phys->zp_size--; /* one dirent removed */ + dzp->z_phys->zp_links -= zp_is_dir; /* ".." link from zp */ + zfs_time_stamper_locked(dzp, CONTENT_MODIFIED, tx); + mutex_exit(&dzp->z_lock); + + if (zp->z_zfsvfs->z_norm) { + if (((zp->z_zfsvfs->z_case == ZFS_CASE_INSENSITIVE) && + (flag & ZCIEXACT)) || + ((zp->z_zfsvfs->z_case == ZFS_CASE_MIXED) && + !(flag & ZCILOOK))) + error = zap_remove_norm(zp->z_zfsvfs->z_os, + dzp->z_id, dl->dl_name, MT_EXACT, tx); + else + error = zap_remove_norm(zp->z_zfsvfs->z_os, + dzp->z_id, dl->dl_name, MT_FIRST, tx); + } else { + error = zap_remove(zp->z_zfsvfs->z_os, + dzp->z_id, dl->dl_name, tx); + } + ASSERT(error == 0); + + if (unlinkedp != NULL) + *unlinkedp = unlinked; + else if (unlinked) + zfs_unlinked_add(zp, tx); + + return (0); +} + +/* + * Indicate whether the directory is empty. Works with or without z_lock + * held, but can only be consider a hint in the latter case. Returns true + * if only "." and ".." remain and there's no work in progress. + */ +boolean_t +zfs_dirempty(znode_t *dzp) +{ + return (dzp->z_phys->zp_size == 2 && dzp->z_dirlocks == 0); +} + +int +zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *xzp; + dmu_tx_t *tx; + int error; + zfs_fuid_info_t *fuidp = NULL; + + *xvpp = NULL; + + if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)) + return (error); + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, zp->z_id); + dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); + if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) + dmu_tx_wait(tx); + dmu_tx_abort(tx); + return (error); + } + zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, 0, NULL, &fuidp); + ASSERT(xzp->z_phys->zp_parent == zp->z_id); + dmu_buf_will_dirty(zp->z_dbuf, tx); + zp->z_phys->zp_xattr = xzp->z_id; + + (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, + xzp, "", NULL, fuidp, vap); + if (fuidp) + zfs_fuid_info_free(fuidp); + dmu_tx_commit(tx); + + *xvpp = ZTOV(xzp); + + return (0); +} + +/* + * Return a znode for the extended attribute directory for zp. + * ** If the directory does not already exist, it is created ** + * + * IN: zp - znode to obtain attribute directory from + * cr - credentials of caller + * flags - flags from the VOP_LOOKUP call + * + * OUT: xzpp - pointer to extended attribute znode + * + * RETURN: 0 on success + * error number on failure + */ +int +zfs_get_xattrdir(znode_t *zp, vnode_t **xvpp, cred_t *cr, int flags) +{ + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *xzp; + zfs_dirlock_t *dl; + vattr_t va; + int error; +top: + error = zfs_dirent_lock(&dl, zp, "", &xzp, ZXATTR, NULL, NULL); + if (error) + return (error); + + if (xzp != NULL) { + *xvpp = ZTOV(xzp); + zfs_dirent_unlock(dl); + return (0); + } + + ASSERT(zp->z_phys->zp_xattr == 0); + + if (!(flags & CREATE_XATTR_DIR)) { + zfs_dirent_unlock(dl); + return (ENOENT); + } + + if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { + zfs_dirent_unlock(dl); + return (EROFS); + } + + /* + * The ability to 'create' files in an attribute + * directory comes from the write_xattr permission on the base file. + * + * The ability to 'search' an attribute directory requires + * read_xattr permission on the base file. + * + * Once in a directory the ability to read/write attributes + * is controlled by the permissions on the attribute file. + */ + va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID; + va.va_type = VDIR; + va.va_mode = S_IFDIR | S_ISVTX | 0777; + zfs_fuid_map_ids(zp, cr, &va.va_uid, &va.va_gid); + + error = zfs_make_xattrdir(zp, &va, xvpp, cr); + zfs_dirent_unlock(dl); + + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + /* NB: we already did dmu_tx_wait() if necessary */ + goto top; + } + + return (error); +} + +/* + * Decide whether it is okay to remove within a sticky directory. + * + * In sticky directories, write access is not sufficient; + * you can remove entries from a directory only if: + * + * you own the directory, + * you own the entry, + * the entry is a plain file and you have write access, + * or you are privileged (checked in secpolicy...). + * + * The function returns 0 if remove access is granted. + */ +int +zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr) +{ + uid_t uid; + uid_t downer; + uid_t fowner; + zfsvfs_t *zfsvfs = zdp->z_zfsvfs; + + if (zdp->z_zfsvfs->z_assign >= TXG_INITIAL) /* ZIL replay */ + return (0); + + if ((zdp->z_phys->zp_mode & S_ISVTX) == 0) + return (0); + + downer = zfs_fuid_map_id(zfsvfs, zdp->z_phys->zp_uid, cr, ZFS_OWNER); + fowner = zfs_fuid_map_id(zfsvfs, zp->z_phys->zp_uid, cr, ZFS_OWNER); + + if ((uid = crgetuid(cr)) == downer || uid == fowner || + (ZTOV(zp)->v_type == VREG && + zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr) == 0)) + return (0); + else + return (secpolicy_vnode_remove(cr)); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fm.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fm.c new file mode 100644 index 000000000000..236d69e7e6f0 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fm.c @@ -0,0 +1,362 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * This general routine is responsible for generating all the different ZFS + * ereports. The payload is dependent on the class, and which arguments are + * supplied to the function: + * + * EREPORT POOL VDEV IO + * block X X X + * data X X + * device X X + * pool X + * + * If we are in a loading state, all errors are chained together by the same + * SPA-wide ENA (Error Numeric Association). + * + * For isolated I/O requests, we get the ENA from the zio_t. The propagation + * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want + * to chain together all ereports associated with a logical piece of data. For + * read I/Os, there are basically three 'types' of I/O, which form a roughly + * layered diagram: + * + * +---------------+ + * | Aggregate I/O | No associated logical data or device + * +---------------+ + * | + * V + * +---------------+ Reads associated with a piece of logical data. + * | Read I/O | This includes reads on behalf of RAID-Z, + * +---------------+ mirrors, gang blocks, retries, etc. + * | + * V + * +---------------+ Reads associated with a particular device, but + * | Physical I/O | no logical data. Issued as part of vdev caching + * +---------------+ and I/O aggregation. + * + * Note that 'physical I/O' here is not the same terminology as used in the rest + * of ZIO. Typically, 'physical I/O' simply means that there is no attached + * blockpointer. But I/O with no associated block pointer can still be related + * to a logical piece of data (i.e. RAID-Z requests). + * + * Purely physical I/O always have unique ENAs. They are not related to a + * particular piece of logical data, and therefore cannot be chained together. + * We still generate an ereport, but the DE doesn't correlate it with any + * logical piece of data. When such an I/O fails, the delegated I/O requests + * will issue a retry, which will trigger the 'real' ereport with the correct + * ENA. + * + * We keep track of the ENA for a ZIO chain through the 'io_logical' member. + * When a new logical I/O is issued, we set this to point to itself. Child I/Os + * then inherit this pointer, so that when it is first set subsequent failures + * will use the same ENA. For vdev cache fill and queue aggregation I/O, + * this pointer is set to NULL, and no ereport will be generated (since it + * doesn't actually correspond to any particular device or piece of data, + * and the caller will always retry without caching or queueing anyway). + */ +void +zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio, + uint64_t stateoroffset, uint64_t size) +{ +#ifdef _KERNEL + nvlist_t *ereport, *detector; + uint64_t ena; + char class[64]; + int state; + + /* + * If we are doing a spa_tryimport(), ignore errors. + */ + if (spa->spa_load_state == SPA_LOAD_TRYIMPORT) + return; + + /* + * If we are in the middle of opening a pool, and the previous attempt + * failed, don't bother logging any new ereports - we're just going to + * get the same diagnosis anyway. + */ + if (spa->spa_load_state != SPA_LOAD_NONE && + spa->spa_last_open_failed) + return; + + if (zio != NULL) { + /* + * If this is not a read or write zio, ignore the error. This + * can occur if the DKIOCFLUSHWRITECACHE ioctl fails. + */ + if (zio->io_type != ZIO_TYPE_READ && + zio->io_type != ZIO_TYPE_WRITE) + return; + + /* + * Ignore any errors from speculative I/Os, as failure is an + * expected result. + */ + if (zio->io_flags & ZIO_FLAG_SPECULATIVE) + return; + + /* + * If the vdev has already been marked as failing due to a + * failed probe, then ignore any subsequent I/O errors, as the + * DE will automatically fault the vdev on the first such + * failure. + */ + if (vd != NULL && + (!vdev_readable(vd) || !vdev_writeable(vd)) && + strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) != 0) + return; + } + + if ((ereport = fm_nvlist_create(NULL)) == NULL) + return; + + if ((detector = fm_nvlist_create(NULL)) == NULL) { + fm_nvlist_destroy(ereport, FM_NVA_FREE); + return; + } + + /* + * Serialize ereport generation + */ + mutex_enter(&spa->spa_errlist_lock); + + /* + * Determine the ENA to use for this event. If we are in a loading + * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use + * a root zio-wide ENA. Otherwise, simply use a unique ENA. + */ + if (spa->spa_load_state != SPA_LOAD_NONE) { + if (spa->spa_ena == 0) + spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1); + ena = spa->spa_ena; + } else if (zio != NULL && zio->io_logical != NULL) { + if (zio->io_logical->io_ena == 0) + zio->io_logical->io_ena = + fm_ena_generate(0, FM_ENA_FMT1); + ena = zio->io_logical->io_ena; + } else { + ena = fm_ena_generate(0, FM_ENA_FMT1); + } + + /* + * Construct the full class, detector, and other standard FMA fields. + */ + (void) snprintf(class, sizeof (class), "%s.%s", + ZFS_ERROR_CLASS, subclass); + + fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa), + vd != NULL ? vd->vdev_guid : 0); + + fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL); + + /* + * Construct the per-ereport payload, depending on which parameters are + * passed in. + */ + + /* + * If we are importing a faulted pool, then we treat it like an open, + * not an import. Otherwise, the DE will ignore all faults during + * import, since the default behavior is to mark the devices as + * persistently unavailable, not leave them in the faulted state. + */ + state = spa->spa_import_faulted ? SPA_LOAD_OPEN : spa->spa_load_state; + + /* + * Generic payload members common to all ereports. + */ + fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL, + DATA_TYPE_STRING, spa_name(spa), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, + DATA_TYPE_UINT64, spa_guid(spa), + FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32, + state, NULL); + + if (spa != NULL) { + fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE, + DATA_TYPE_STRING, + spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ? + FM_EREPORT_FAILMODE_WAIT : + spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ? + FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC, + NULL); + } + + if (vd != NULL) { + vdev_t *pvd = vd->vdev_parent; + + fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, + DATA_TYPE_UINT64, vd->vdev_guid, + FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE, + DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL); + if (vd->vdev_path) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, + DATA_TYPE_STRING, vd->vdev_path, NULL); + if (vd->vdev_devid) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, + DATA_TYPE_STRING, vd->vdev_devid, NULL); + + if (pvd != NULL) { + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, + DATA_TYPE_UINT64, pvd->vdev_guid, + FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE, + DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type, + NULL); + if (pvd->vdev_path) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH, + DATA_TYPE_STRING, pvd->vdev_path, NULL); + if (pvd->vdev_devid) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID, + DATA_TYPE_STRING, pvd->vdev_devid, NULL); + } + } + + if (zio != NULL) { + /* + * Payload common to all I/Os. + */ + fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR, + DATA_TYPE_INT32, zio->io_error, NULL); + + /* + * If the 'size' parameter is non-zero, it indicates this is a + * RAID-Z or other I/O where the physical offset and length are + * provided for us, instead of within the zio_t. + */ + if (vd != NULL) { + if (size) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET, + DATA_TYPE_UINT64, stateoroffset, + FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE, + DATA_TYPE_UINT64, size, NULL); + else + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET, + DATA_TYPE_UINT64, zio->io_offset, + FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE, + DATA_TYPE_UINT64, zio->io_size, NULL); + } + + /* + * Payload for I/Os with corresponding logical information. + */ + if (zio->io_logical != NULL) + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET, + DATA_TYPE_UINT64, + zio->io_logical->io_bookmark.zb_objset, + FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT, + DATA_TYPE_UINT64, + zio->io_logical->io_bookmark.zb_object, + FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL, + DATA_TYPE_INT64, + zio->io_logical->io_bookmark.zb_level, + FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID, + DATA_TYPE_UINT64, + zio->io_logical->io_bookmark.zb_blkid, NULL); + } else if (vd != NULL) { + /* + * If we have a vdev but no zio, this is a device fault, and the + * 'stateoroffset' parameter indicates the previous state of the + * vdev. + */ + fm_payload_set(ereport, + FM_EREPORT_PAYLOAD_ZFS_PREV_STATE, + DATA_TYPE_UINT64, stateoroffset, NULL); + } + mutex_exit(&spa->spa_errlist_lock); + + fm_ereport_post(ereport, EVCH_SLEEP); + + fm_nvlist_destroy(ereport, FM_NVA_FREE); + fm_nvlist_destroy(detector, FM_NVA_FREE); +#endif +} + +static void +zfs_post_common(spa_t *spa, vdev_t *vd, const char *name) +{ +#ifdef _KERNEL + nvlist_t *resource; + char class[64]; + + if ((resource = fm_nvlist_create(NULL)) == NULL) + return; + + (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE, + ZFS_ERROR_CLASS, name); + VERIFY(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION) == 0); + VERIFY(nvlist_add_string(resource, FM_CLASS, class) == 0); + VERIFY(nvlist_add_uint64(resource, + FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)) == 0); + if (vd) + VERIFY(nvlist_add_uint64(resource, + FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid) == 0); + + fm_ereport_post(resource, EVCH_SLEEP); + + fm_nvlist_destroy(resource, FM_NVA_FREE); +#endif +} + +/* + * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev + * has been removed from the system. This will cause the DE to ignore any + * recent I/O errors, inferring that they are due to the asynchronous device + * removal. + */ +void +zfs_post_remove(spa_t *spa, vdev_t *vd) +{ + zfs_post_common(spa, vd, FM_RESOURCE_REMOVED); +} + +/* + * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool + * has the 'autoreplace' property set, and therefore any broken vdevs will be + * handled by higher level logic, and no vdev fault should be generated. + */ +void +zfs_post_autoreplace(spa_t *spa, vdev_t *vd) +{ + zfs_post_common(spa, vd, FM_RESOURCE_AUTOREPLACE); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fuid.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fuid.c new file mode 100644 index 000000000000..7cb505258d8f --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_fuid.c @@ -0,0 +1,704 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef _KERNEL +#include +#include +#include +#include +#endif +#include + +/* + * FUID Domain table(s). + * + * The FUID table is stored as a packed nvlist of an array + * of nvlists which contain an index, domain string and offset + * + * During file system initialization the nvlist(s) are read and + * two AVL trees are created. One tree is keyed by the index number + * and the other by the domain string. Nodes are never removed from + * trees, but new entries may be added. If a new entry is added then the + * on-disk packed nvlist will also be updated. + */ + +#define FUID_IDX "fuid_idx" +#define FUID_DOMAIN "fuid_domain" +#define FUID_OFFSET "fuid_offset" +#define FUID_NVP_ARRAY "fuid_nvlist" + +typedef struct fuid_domain { + avl_node_t f_domnode; + avl_node_t f_idxnode; + ksiddomain_t *f_ksid; + uint64_t f_idx; +} fuid_domain_t; + +static char *nulldomain = ""; + +/* + * Compare two indexes. + */ +static int +idx_compare(const void *arg1, const void *arg2) +{ + const fuid_domain_t *node1 = arg1; + const fuid_domain_t *node2 = arg2; + + if (node1->f_idx < node2->f_idx) + return (-1); + else if (node1->f_idx > node2->f_idx) + return (1); + return (0); +} + +/* + * Compare two domain strings. + */ +static int +domain_compare(const void *arg1, const void *arg2) +{ + const fuid_domain_t *node1 = arg1; + const fuid_domain_t *node2 = arg2; + int val; + + val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name); + if (val == 0) + return (0); + return (val > 0 ? 1 : -1); +} + +/* + * load initial fuid domain and idx trees. This function is used by + * both the kernel and zdb. + */ +uint64_t +zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree, + avl_tree_t *domain_tree) +{ + dmu_buf_t *db; + uint64_t fuid_size; + + avl_create(idx_tree, idx_compare, + sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode)); + avl_create(domain_tree, domain_compare, + sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode)); + + VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db)); + fuid_size = *(uint64_t *)db->db_data; + dmu_buf_rele(db, FTAG); + + if (fuid_size) { + nvlist_t **fuidnvp; + nvlist_t *nvp = NULL; + uint_t count; + char *packed; + int i; + + packed = kmem_alloc(fuid_size, KM_SLEEP); + VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed) == 0); + VERIFY(nvlist_unpack(packed, fuid_size, + &nvp, 0) == 0); + VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY, + &fuidnvp, &count) == 0); + + for (i = 0; i != count; i++) { + fuid_domain_t *domnode; + char *domain; + uint64_t idx; + + VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN, + &domain) == 0); + VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX, + &idx) == 0); + + domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); + + domnode->f_idx = idx; + domnode->f_ksid = ksid_lookupdomain(domain); + avl_add(idx_tree, domnode); + avl_add(domain_tree, domnode); + } + nvlist_free(nvp); + kmem_free(packed, fuid_size); + } + return (fuid_size); +} + +void +zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree) +{ + fuid_domain_t *domnode; + void *cookie; + + cookie = NULL; + while (domnode = avl_destroy_nodes(domain_tree, &cookie)) + ksiddomain_rele(domnode->f_ksid); + + avl_destroy(domain_tree); + cookie = NULL; + while (domnode = avl_destroy_nodes(idx_tree, &cookie)) + kmem_free(domnode, sizeof (fuid_domain_t)); + avl_destroy(idx_tree); +} + +char * +zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx) +{ + fuid_domain_t searchnode, *findnode; + avl_index_t loc; + + searchnode.f_idx = idx; + + findnode = avl_find(idx_tree, &searchnode, &loc); + + return (findnode ? findnode->f_ksid->kd_name : nulldomain); +} + +#ifdef _KERNEL +/* + * Load the fuid table(s) into memory. + */ +static void +zfs_fuid_init(zfsvfs_t *zfsvfs, dmu_tx_t *tx) +{ + int error = 0; + + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); + + if (zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); + return; + } + + if (zfsvfs->z_fuid_obj == 0) { + + /* first make sure we need to allocate object */ + + error = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ, + ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj); + if (error == ENOENT && tx != NULL) { + zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os, + DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE, + sizeof (uint64_t), tx); + VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, + ZFS_FUID_TABLES, sizeof (uint64_t), 1, + &zfsvfs->z_fuid_obj, tx) == 0); + } + } + + if (zfsvfs->z_fuid_obj != 0) { + zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os, + zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx, + &zfsvfs->z_fuid_domain); + zfsvfs->z_fuid_loaded = B_TRUE; + } + + rw_exit(&zfsvfs->z_fuid_lock); +} + +/* + * Query domain table for a given domain. + * + * If domain isn't found it is added to AVL trees and + * the results are pushed out to disk. + */ +int +zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain, + dmu_tx_t *tx) +{ + fuid_domain_t searchnode, *findnode; + avl_index_t loc; + krw_t rw = RW_READER; + + /* + * If the dummy "nobody" domain then return an index of 0 + * to cause the created FUID to be a standard POSIX id + * for the user nobody. + */ + if (domain[0] == '\0') { + *retdomain = nulldomain; + return (0); + } + + searchnode.f_ksid = ksid_lookupdomain(domain); + if (retdomain) { + *retdomain = searchnode.f_ksid->kd_name; + } + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs, tx); + +retry: + rw_enter(&zfsvfs->z_fuid_lock, rw); + findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc); + + if (findnode) { + rw_exit(&zfsvfs->z_fuid_lock); + ksiddomain_rele(searchnode.f_ksid); + return (findnode->f_idx); + } else { + fuid_domain_t *domnode; + nvlist_t *nvp; + nvlist_t **fuids; + uint64_t retidx; + size_t nvsize = 0; + char *packed; + dmu_buf_t *db; + int i = 0; + + if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) { + rw_exit(&zfsvfs->z_fuid_lock); + rw = RW_WRITER; + goto retry; + } + + domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP); + domnode->f_ksid = searchnode.f_ksid; + + retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1; + + avl_add(&zfsvfs->z_fuid_domain, domnode); + avl_add(&zfsvfs->z_fuid_idx, domnode); + /* + * Now resync the on-disk nvlist. + */ + VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); + + domnode = avl_first(&zfsvfs->z_fuid_domain); + fuids = kmem_alloc(retidx * sizeof (void *), KM_SLEEP); + while (domnode) { + VERIFY(nvlist_alloc(&fuids[i], + NV_UNIQUE_NAME, KM_SLEEP) == 0); + VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX, + domnode->f_idx) == 0); + VERIFY(nvlist_add_uint64(fuids[i], + FUID_OFFSET, 0) == 0); + VERIFY(nvlist_add_string(fuids[i++], FUID_DOMAIN, + domnode->f_ksid->kd_name) == 0); + domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode); + } + VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY, + fuids, retidx) == 0); + for (i = 0; i != retidx; i++) + nvlist_free(fuids[i]); + kmem_free(fuids, retidx * sizeof (void *)); + VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0); + packed = kmem_alloc(nvsize, KM_SLEEP); + VERIFY(nvlist_pack(nvp, &packed, &nvsize, + NV_ENCODE_XDR, KM_SLEEP) == 0); + nvlist_free(nvp); + zfsvfs->z_fuid_size = nvsize; + dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0, + zfsvfs->z_fuid_size, packed, tx); + kmem_free(packed, zfsvfs->z_fuid_size); + VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, + FTAG, &db)); + dmu_buf_will_dirty(db, tx); + *(uint64_t *)db->db_data = zfsvfs->z_fuid_size; + dmu_buf_rele(db, FTAG); + + rw_exit(&zfsvfs->z_fuid_lock); + return (retidx); + } +} + +/* + * Query domain table by index, returning domain string + * + * Returns a pointer from an avl node of the domain string. + * + */ +static char * +zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx) +{ + char *domain; + + if (idx == 0 || !zfsvfs->z_use_fuids) + return (NULL); + + if (!zfsvfs->z_fuid_loaded) + zfs_fuid_init(zfsvfs, NULL); + + rw_enter(&zfsvfs->z_fuid_lock, RW_READER); + + if (zfsvfs->z_fuid_obj) + domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx); + else + domain = nulldomain; + rw_exit(&zfsvfs->z_fuid_lock); + + ASSERT(domain); + return (domain); +} + +void +zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp) +{ + *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_phys->zp_uid, + cr, ZFS_OWNER); + *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_phys->zp_gid, + cr, ZFS_GROUP); +} + +uid_t +zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, + cred_t *cr, zfs_fuid_type_t type) +{ + uint32_t index = FUID_INDEX(fuid); + char *domain; + uid_t id; + + if (index == 0) + return (fuid); + + domain = zfs_fuid_find_by_idx(zfsvfs, index); + ASSERT(domain != NULL); + + if (type == ZFS_OWNER || type == ZFS_ACE_USER) { + (void) kidmap_getuidbysid(crgetzone(cr), domain, + FUID_RID(fuid), &id); + } else { + (void) kidmap_getgidbysid(crgetzone(cr), domain, + FUID_RID(fuid), &id); + } + return (id); +} + +/* + * Add a FUID node to the list of fuid's being created for this + * ACL + * + * If ACL has multiple domains, then keep only one copy of each unique + * domain. + */ +static void +zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid, + uint64_t idx, uint64_t id, zfs_fuid_type_t type) +{ + zfs_fuid_t *fuid; + zfs_fuid_domain_t *fuid_domain; + zfs_fuid_info_t *fuidp; + uint64_t fuididx; + boolean_t found = B_FALSE; + + if (*fuidpp == NULL) + *fuidpp = zfs_fuid_info_alloc(); + + fuidp = *fuidpp; + /* + * First find fuid domain index in linked list + * + * If one isn't found then create an entry. + */ + + for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains); + fuid_domain; fuid_domain = list_next(&fuidp->z_domains, + fuid_domain), fuididx++) { + if (idx == fuid_domain->z_domidx) { + found = B_TRUE; + break; + } + } + + if (!found) { + fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP); + fuid_domain->z_domain = domain; + fuid_domain->z_domidx = idx; + list_insert_tail(&fuidp->z_domains, fuid_domain); + fuidp->z_domain_str_sz += strlen(domain) + 1; + fuidp->z_domain_cnt++; + } + + if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) { + /* + * Now allocate fuid entry and add it on the end of the list + */ + + fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP); + fuid->z_id = id; + fuid->z_domidx = idx; + fuid->z_logfuid = FUID_ENCODE(fuididx, rid); + + list_insert_tail(&fuidp->z_fuids, fuid); + fuidp->z_fuid_cnt++; + } else { + if (type == ZFS_OWNER) + fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid); + else + fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid); + } +} + +/* + * Create a file system FUID, based on information in the users cred + */ +uint64_t +zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type, + dmu_tx_t *tx, cred_t *cr, zfs_fuid_info_t **fuidp) +{ + uint64_t idx; + ksid_t *ksid; + uint32_t rid; + char *kdomain; + const char *domain; + uid_t id; + + VERIFY(type == ZFS_OWNER || type == ZFS_GROUP); + + ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP); + if (ksid) { + id = ksid_getid(ksid); + } else { + if (type == ZFS_OWNER) + id = crgetuid(cr); + else + id = crgetgid(cr); + } + + if (!zfsvfs->z_use_fuids || (!IS_EPHEMERAL(id))) + return ((uint64_t)id); + + rid = ksid_getrid(ksid); + domain = ksid_getdomain(ksid); + + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, tx); + + zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type); + + return (FUID_ENCODE(idx, rid)); +} + +/* + * Create a file system FUID for an ACL ace + * or a chown/chgrp of the file. + * This is similar to zfs_fuid_create_cred, except that + * we can't find the domain + rid information in the + * cred. Instead we have to query Winchester for the + * domain and rid. + * + * During replay operations the domain+rid information is + * found in the zfs_fuid_info_t that the replay code has + * attached to the zfsvfs of the file system. + */ +uint64_t +zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, + zfs_fuid_type_t type, dmu_tx_t *tx, zfs_fuid_info_t **fuidpp) +{ + const char *domain; + char *kdomain; + uint32_t fuid_idx = FUID_INDEX(id); + uint32_t rid; + idmap_stat status; + uint64_t idx; + boolean_t is_replay = (zfsvfs->z_assign >= TXG_INITIAL); + zfs_fuid_t *zfuid = NULL; + zfs_fuid_info_t *fuidp; + + /* + * If POSIX ID, or entry is already a FUID then + * just return the id + * + * We may also be handed an already FUID'ized id via + * chmod. + */ + + if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) + return (id); + + if (is_replay) { + fuidp = zfsvfs->z_fuid_replay; + + /* + * If we are passed an ephemeral id, but no + * fuid_info was logged then return NOBODY. + * This is most likely a result of idmap service + * not being available. + */ + if (fuidp == NULL) + return (UID_NOBODY); + + switch (type) { + case ZFS_ACE_USER: + case ZFS_ACE_GROUP: + zfuid = list_head(&fuidp->z_fuids); + rid = FUID_RID(zfuid->z_logfuid); + idx = FUID_INDEX(zfuid->z_logfuid); + break; + case ZFS_OWNER: + rid = FUID_RID(fuidp->z_fuid_owner); + idx = FUID_INDEX(fuidp->z_fuid_owner); + break; + case ZFS_GROUP: + rid = FUID_RID(fuidp->z_fuid_group); + idx = FUID_INDEX(fuidp->z_fuid_group); + break; + }; + domain = fuidp->z_domain_table[idx -1]; + } else { + if (type == ZFS_OWNER || type == ZFS_ACE_USER) + status = kidmap_getsidbyuid(crgetzone(cr), id, + &domain, &rid); + else + status = kidmap_getsidbygid(crgetzone(cr), id, + &domain, &rid); + + if (status != 0) { + /* + * When returning nobody we will need to + * make a dummy fuid table entry for logging + * purposes. + */ + rid = UID_NOBODY; + domain = nulldomain; + } + } + + idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, tx); + + if (!is_replay) + zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); + else if (zfuid != NULL) { + list_remove(&fuidp->z_fuids, zfuid); + kmem_free(zfuid, sizeof (zfs_fuid_t)); + } + return (FUID_ENCODE(idx, rid)); +} + +void +zfs_fuid_destroy(zfsvfs_t *zfsvfs) +{ + rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER); + if (!zfsvfs->z_fuid_loaded) { + rw_exit(&zfsvfs->z_fuid_lock); + return; + } + zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain); + rw_exit(&zfsvfs->z_fuid_lock); +} + +/* + * Allocate zfs_fuid_info for tracking FUIDs created during + * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR() + */ +zfs_fuid_info_t * +zfs_fuid_info_alloc(void) +{ + zfs_fuid_info_t *fuidp; + + fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP); + list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t), + offsetof(zfs_fuid_domain_t, z_next)); + list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t), + offsetof(zfs_fuid_t, z_next)); + return (fuidp); +} + +/* + * Release all memory associated with zfs_fuid_info_t + */ +void +zfs_fuid_info_free(zfs_fuid_info_t *fuidp) +{ + zfs_fuid_t *zfuid; + zfs_fuid_domain_t *zdomain; + + while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) { + list_remove(&fuidp->z_fuids, zfuid); + kmem_free(zfuid, sizeof (zfs_fuid_t)); + } + + if (fuidp->z_domain_table != NULL) + kmem_free(fuidp->z_domain_table, + (sizeof (char **)) * fuidp->z_domain_cnt); + + while ((zdomain = list_head(&fuidp->z_domains)) != NULL) { + list_remove(&fuidp->z_domains, zdomain); + kmem_free(zdomain, sizeof (zfs_fuid_domain_t)); + } + + kmem_free(fuidp, sizeof (zfs_fuid_info_t)); +} + +/* + * Check to see if id is a groupmember. If cred + * has ksid info then sidlist is checked first + * and if still not found then POSIX groups are checked + * + * Will use a straight FUID compare when possible. + */ +boolean_t +zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr) +{ + ksid_t *ksid = crgetsid(cr, KSID_GROUP); + uid_t gid; + + if (ksid) { + int i; + ksid_t *ksid_groups; + ksidlist_t *ksidlist = crgetsidlist(cr); + uint32_t idx = FUID_INDEX(id); + uint32_t rid = FUID_RID(id); + + ASSERT(ksidlist); + ksid_groups = ksidlist->ksl_sids; + + for (i = 0; i != ksidlist->ksl_nsid; i++) { + if (idx == 0) { + if (id != IDMAP_WK_CREATOR_GROUP_GID && + id == ksid_groups[i].ks_id) { + return (B_TRUE); + } + } else { + char *domain; + + domain = zfs_fuid_find_by_idx(zfsvfs, idx); + ASSERT(domain != NULL); + + if (strcmp(domain, + IDMAP_WK_CREATOR_SID_AUTHORITY) == 0) + return (B_FALSE); + + if ((strcmp(domain, + ksid_groups[i].ks_domain->kd_name) == 0) && + rid == ksid_groups[i].ks_rid) + return (B_TRUE); + } + } + } + + /* + * Not found in ksidlist, check posix groups + */ + gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP); + return (groupmember(gid, cr)); +} +#endif diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c new file mode 100644 index 000000000000..c77892f90ab5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_ioctl.c @@ -0,0 +1,3201 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zfs_namecheck.h" +#include "zfs_prop.h" +#include "zfs_deleg.h" + +extern struct modlfs zfs_modlfs; + +extern void zfs_init(void); +extern void zfs_fini(void); + +ldi_ident_t zfs_li = NULL; +dev_info_t *zfs_dip; + +typedef int zfs_ioc_func_t(zfs_cmd_t *); +typedef int zfs_secpolicy_func_t(zfs_cmd_t *, cred_t *); + +typedef struct zfs_ioc_vec { + zfs_ioc_func_t *zvec_func; + zfs_secpolicy_func_t *zvec_secpolicy; + enum { + NO_NAME, + POOL_NAME, + DATASET_NAME + } zvec_namecheck; + boolean_t zvec_his_log; +} zfs_ioc_vec_t; + +static void clear_props(char *dataset, nvlist_t *props); +static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *, + boolean_t *); +int zfs_set_prop_nvlist(const char *, nvlist_t *); + +/* _NOTE(PRINTFLIKE(4)) - this is printf-like, but lint is too whiney */ +void +__dprintf(const char *file, const char *func, int line, const char *fmt, ...) +{ + const char *newfile; + char buf[256]; + va_list adx; + + /* + * Get rid of annoying "../common/" prefix to filename. + */ + newfile = strrchr(file, '/'); + if (newfile != NULL) { + newfile = newfile + 1; /* Get rid of leading / */ + } else { + newfile = file; + } + + va_start(adx, fmt); + (void) vsnprintf(buf, sizeof (buf), fmt, adx); + va_end(adx); + + /* + * To get this data, use the zfs-dprintf probe as so: + * dtrace -q -n 'zfs-dprintf \ + * /stringof(arg0) == "dbuf.c"/ \ + * {printf("%s: %s", stringof(arg1), stringof(arg3))}' + * arg0 = file name + * arg1 = function name + * arg2 = line number + * arg3 = message + */ + DTRACE_PROBE4(zfs__dprintf, + char *, newfile, char *, func, int, line, char *, buf); +} + +static void +history_str_free(char *buf) +{ + kmem_free(buf, HIS_MAX_RECORD_LEN); +} + +static char * +history_str_get(zfs_cmd_t *zc) +{ + char *buf; + + if (zc->zc_history == NULL) + return (NULL); + + buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP); + if (copyinstr((void *)(uintptr_t)zc->zc_history, + buf, HIS_MAX_RECORD_LEN, NULL) != 0) { + history_str_free(buf); + return (NULL); + } + + buf[HIS_MAX_RECORD_LEN -1] = '\0'; + + return (buf); +} + +/* + * Check to see if the named dataset is currently defined as bootable + */ +static boolean_t +zfs_is_bootfs(const char *name) +{ + spa_t *spa; + boolean_t ret = B_FALSE; + + if (spa_open(name, &spa, FTAG) == 0) { + if (spa->spa_bootfs) { + objset_t *os; + + if (dmu_objset_open(name, DMU_OST_ZFS, + DS_MODE_USER | DS_MODE_READONLY, &os) == 0) { + ret = (dmu_objset_id(os) == spa->spa_bootfs); + dmu_objset_close(os); + } + } + spa_close(spa, FTAG); + } + return (ret); +} + +/* + * zfs_earlier_version + * + * Return non-zero if the spa version is less than requested version. + */ +static int +zfs_earlier_version(const char *name, int version) +{ + spa_t *spa; + + if (spa_open(name, &spa, FTAG) == 0) { + if (spa_version(spa) < version) { + spa_close(spa, FTAG); + return (1); + } + spa_close(spa, FTAG); + } + return (0); +} + +/* + * zpl_earlier_version + * + * Return TRUE if the ZPL version is less than requested version. + */ +static boolean_t +zpl_earlier_version(const char *name, int version) +{ + objset_t *os; + boolean_t rc = B_TRUE; + + if (dmu_objset_open(name, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os) == 0) { + uint64_t zplversion; + + if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0) + rc = zplversion < version; + dmu_objset_close(os); + } + return (rc); +} + +static void +zfs_log_history(zfs_cmd_t *zc) +{ + spa_t *spa; + char *buf; + + if ((buf = history_str_get(zc)) == NULL) + return; + + if (spa_open(zc->zc_name, &spa, FTAG) == 0) { + if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY) + (void) spa_history_log(spa, buf, LOG_CMD_NORMAL); + spa_close(spa, FTAG); + } + history_str_free(buf); +} + +/* + * Policy for top-level read operations (list pools). Requires no privileges, + * and can be used in the local zone, as there is no associated dataset. + */ +/* ARGSUSED */ +static int +zfs_secpolicy_none(zfs_cmd_t *zc, cred_t *cr) +{ + return (0); +} + +/* + * Policy for dataset read operations (list children, get statistics). Requires + * no privileges, but must be visible in the local zone. + */ +/* ARGSUSED */ +static int +zfs_secpolicy_read(zfs_cmd_t *zc, cred_t *cr) +{ + if (INGLOBALZONE(curproc) || + zone_dataset_visible(zc->zc_name, NULL)) + return (0); + + return (ENOENT); +} + +static int +zfs_dozonecheck(const char *dataset, cred_t *cr) +{ + uint64_t zoned; + int writable = 1; + + /* + * The dataset must be visible by this zone -- check this first + * so they don't see EPERM on something they shouldn't know about. + */ + if (!INGLOBALZONE(curproc) && + !zone_dataset_visible(dataset, &writable)) + return (ENOENT); + + if (dsl_prop_get_integer(dataset, "zoned", &zoned, NULL)) + return (ENOENT); + + if (INGLOBALZONE(curproc)) { + /* + * If the fs is zoned, only root can access it from the + * global zone. + */ + if (secpolicy_zfs(cr) && zoned) + return (EPERM); + } else { + /* + * If we are in a local zone, the 'zoned' property must be set. + */ + if (!zoned) + return (EPERM); + + /* must be writable by this zone */ + if (!writable) + return (EPERM); + } + return (0); +} + +int +zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr) +{ + int error; + + error = zfs_dozonecheck(name, cr); + if (error == 0) { + error = secpolicy_zfs(cr); + if (error) + error = dsl_deleg_access(name, perm, cr); + } + return (error); +} + +static int +zfs_secpolicy_setprop(const char *name, zfs_prop_t prop, cred_t *cr) +{ + /* + * Check permissions for special properties. + */ + switch (prop) { + case ZFS_PROP_ZONED: + /* + * Disallow setting of 'zoned' from within a local zone. + */ + if (!INGLOBALZONE(curproc)) + return (EPERM); + break; + + case ZFS_PROP_QUOTA: + if (!INGLOBALZONE(curproc)) { + uint64_t zoned; + char setpoint[MAXNAMELEN]; + /* + * Unprivileged users are allowed to modify the + * quota on things *under* (ie. contained by) + * the thing they own. + */ + if (dsl_prop_get_integer(name, "zoned", &zoned, + setpoint)) + return (EPERM); + if (!zoned || strlen(name) <= strlen(setpoint)) + return (EPERM); + } + break; + } + + return (zfs_secpolicy_write_perms(name, zfs_prop_to_name(prop), cr)); +} + +int +zfs_secpolicy_fsacl(zfs_cmd_t *zc, cred_t *cr) +{ + int error; + + error = zfs_dozonecheck(zc->zc_name, cr); + if (error) + return (error); + + /* + * permission to set permissions will be evaluated later in + * dsl_deleg_can_allow() + */ + return (0); +} + +int +zfs_secpolicy_rollback(zfs_cmd_t *zc, cred_t *cr) +{ + int error; + error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_ROLLBACK, cr); + if (error == 0) + error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_MOUNT, cr); + return (error); +} + +int +zfs_secpolicy_send(zfs_cmd_t *zc, cred_t *cr) +{ + return (zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_SEND, cr)); +} + +int +zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr) +{ + if (!INGLOBALZONE(curproc)) + return (EPERM); + + if (secpolicy_nfs(cr) == 0) { + return (0); + } else { + vnode_t *vp; + int error; + + if ((error = lookupname(zc->zc_value, UIO_SYSSPACE, + NO_FOLLOW, NULL, &vp)) != 0) + return (error); + + /* Now make sure mntpnt and dataset are ZFS */ + + if (vp->v_vfsp->vfs_fstype != zfsfstype || + (strcmp((char *)refstr_value(vp->v_vfsp->vfs_resource), + zc->zc_name) != 0)) { + VN_RELE(vp); + return (EPERM); + } + + VN_RELE(vp); + return (dsl_deleg_access(zc->zc_name, + ZFS_DELEG_PERM_SHARE, cr)); + } +} + +static int +zfs_get_parent(const char *datasetname, char *parent, int parentsize) +{ + char *cp; + + /* + * Remove the @bla or /bla from the end of the name to get the parent. + */ + (void) strncpy(parent, datasetname, parentsize); + cp = strrchr(parent, '@'); + if (cp != NULL) { + cp[0] = '\0'; + } else { + cp = strrchr(parent, '/'); + if (cp == NULL) + return (ENOENT); + cp[0] = '\0'; + } + + return (0); +} + +int +zfs_secpolicy_destroy_perms(const char *name, cred_t *cr) +{ + int error; + + if ((error = zfs_secpolicy_write_perms(name, + ZFS_DELEG_PERM_MOUNT, cr)) != 0) + return (error); + + return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr)); +} + +static int +zfs_secpolicy_destroy(zfs_cmd_t *zc, cred_t *cr) +{ + return (zfs_secpolicy_destroy_perms(zc->zc_name, cr)); +} + +/* + * Must have sys_config privilege to check the iscsi permission + */ +/* ARGSUSED */ +static int +zfs_secpolicy_iscsi(zfs_cmd_t *zc, cred_t *cr) +{ + return (secpolicy_zfs(cr)); +} + +int +zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr) +{ + char parentname[MAXNAMELEN]; + int error; + + if ((error = zfs_secpolicy_write_perms(from, + ZFS_DELEG_PERM_RENAME, cr)) != 0) + return (error); + + if ((error = zfs_secpolicy_write_perms(from, + ZFS_DELEG_PERM_MOUNT, cr)) != 0) + return (error); + + if ((error = zfs_get_parent(to, parentname, + sizeof (parentname))) != 0) + return (error); + + if ((error = zfs_secpolicy_write_perms(parentname, + ZFS_DELEG_PERM_CREATE, cr)) != 0) + return (error); + + if ((error = zfs_secpolicy_write_perms(parentname, + ZFS_DELEG_PERM_MOUNT, cr)) != 0) + return (error); + + return (error); +} + +static int +zfs_secpolicy_rename(zfs_cmd_t *zc, cred_t *cr) +{ + return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr)); +} + +static int +zfs_secpolicy_promote(zfs_cmd_t *zc, cred_t *cr) +{ + char parentname[MAXNAMELEN]; + objset_t *clone; + int error; + + error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_PROMOTE, cr); + if (error) + return (error); + + error = dmu_objset_open(zc->zc_name, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &clone); + + if (error == 0) { + dsl_dataset_t *pclone = NULL; + dsl_dir_t *dd; + dd = clone->os->os_dsl_dataset->ds_dir; + + rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); + error = dsl_dataset_hold_obj(dd->dd_pool, + dd->dd_phys->dd_origin_obj, FTAG, &pclone); + rw_exit(&dd->dd_pool->dp_config_rwlock); + if (error) { + dmu_objset_close(clone); + return (error); + } + + error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_MOUNT, cr); + + dsl_dataset_name(pclone, parentname); + dmu_objset_close(clone); + dsl_dataset_rele(pclone, FTAG); + if (error == 0) + error = zfs_secpolicy_write_perms(parentname, + ZFS_DELEG_PERM_PROMOTE, cr); + } + return (error); +} + +static int +zfs_secpolicy_receive(zfs_cmd_t *zc, cred_t *cr) +{ + int error; + + if ((error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_RECEIVE, cr)) != 0) + return (error); + + if ((error = zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_MOUNT, cr)) != 0) + return (error); + + return (zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_CREATE, cr)); +} + +int +zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr) +{ + int error; + + if ((error = zfs_secpolicy_write_perms(name, + ZFS_DELEG_PERM_SNAPSHOT, cr)) != 0) + return (error); + + error = zfs_secpolicy_write_perms(name, + ZFS_DELEG_PERM_MOUNT, cr); + + return (error); +} + +static int +zfs_secpolicy_snapshot(zfs_cmd_t *zc, cred_t *cr) +{ + + return (zfs_secpolicy_snapshot_perms(zc->zc_name, cr)); +} + +static int +zfs_secpolicy_create(zfs_cmd_t *zc, cred_t *cr) +{ + char parentname[MAXNAMELEN]; + int error; + + if ((error = zfs_get_parent(zc->zc_name, parentname, + sizeof (parentname))) != 0) + return (error); + + if (zc->zc_value[0] != '\0') { + if ((error = zfs_secpolicy_write_perms(zc->zc_value, + ZFS_DELEG_PERM_CLONE, cr)) != 0) + return (error); + } + + if ((error = zfs_secpolicy_write_perms(parentname, + ZFS_DELEG_PERM_CREATE, cr)) != 0) + return (error); + + error = zfs_secpolicy_write_perms(parentname, + ZFS_DELEG_PERM_MOUNT, cr); + + return (error); +} + +static int +zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr) +{ + int error; + + error = secpolicy_fs_unmount(cr, NULL); + if (error) { + error = dsl_deleg_access(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr); + } + return (error); +} + +/* + * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires + * SYS_CONFIG privilege, which is not available in a local zone. + */ +/* ARGSUSED */ +static int +zfs_secpolicy_config(zfs_cmd_t *zc, cred_t *cr) +{ + if (secpolicy_sys_config(cr, B_FALSE) != 0) + return (EPERM); + + return (0); +} + +/* + * Just like zfs_secpolicy_config, except that we will check for + * mount permission on the dataset for permission to create/remove + * the minor nodes. + */ +static int +zfs_secpolicy_minor(zfs_cmd_t *zc, cred_t *cr) +{ + if (secpolicy_sys_config(cr, B_FALSE) != 0) { + return (dsl_deleg_access(zc->zc_name, + ZFS_DELEG_PERM_MOUNT, cr)); + } + + return (0); +} + +/* + * Policy for fault injection. Requires all privileges. + */ +/* ARGSUSED */ +static int +zfs_secpolicy_inject(zfs_cmd_t *zc, cred_t *cr) +{ + return (secpolicy_zinject(cr)); +} + +static int +zfs_secpolicy_inherit(zfs_cmd_t *zc, cred_t *cr) +{ + zfs_prop_t prop = zfs_name_to_prop(zc->zc_value); + + if (prop == ZPROP_INVAL) { + if (!zfs_prop_user(zc->zc_value)) + return (EINVAL); + return (zfs_secpolicy_write_perms(zc->zc_name, + ZFS_DELEG_PERM_USERPROP, cr)); + } else { + if (!zfs_prop_inheritable(prop)) + return (EINVAL); + return (zfs_secpolicy_setprop(zc->zc_name, prop, cr)); + } +} + +/* + * Returns the nvlist as specified by the user in the zfs_cmd_t. + */ +static int +get_nvlist(uint64_t nvl, uint64_t size, nvlist_t **nvp) +{ + char *packed; + int error; + nvlist_t *list = NULL; + + /* + * Read in and unpack the user-supplied nvlist. + */ + if (size == 0) + return (EINVAL); + + packed = kmem_alloc(size, KM_SLEEP); + + if ((error = xcopyin((void *)(uintptr_t)nvl, packed, size)) != 0) { + kmem_free(packed, size); + return (error); + } + + if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) { + kmem_free(packed, size); + return (error); + } + + kmem_free(packed, size); + + *nvp = list; + return (0); +} + +static int +put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl) +{ + char *packed = NULL; + size_t size; + int error; + + VERIFY(nvlist_size(nvl, &size, NV_ENCODE_NATIVE) == 0); + + if (size > zc->zc_nvlist_dst_size) { + error = ENOMEM; + } else { + packed = kmem_alloc(size, KM_SLEEP); + VERIFY(nvlist_pack(nvl, &packed, &size, NV_ENCODE_NATIVE, + KM_SLEEP) == 0); + error = xcopyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst, + size); + kmem_free(packed, size); + } + + zc->zc_nvlist_dst_size = size; + return (error); +} + +static int +zfs_ioc_pool_create(zfs_cmd_t *zc) +{ + int error; + nvlist_t *config, *props = NULL; + nvlist_t *rootprops = NULL; + nvlist_t *zplprops = NULL; + char *buf; + + if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, + &config)) + return (error); + + if (zc->zc_nvlist_src_size != 0 && (error = + get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, &props))) { + nvlist_free(config); + return (error); + } + + if (props) { + nvlist_t *nvl = NULL; + uint64_t version = SPA_VERSION; + + (void) nvlist_lookup_uint64(props, + zpool_prop_to_name(ZPOOL_PROP_VERSION), &version); + if (version < SPA_VERSION_INITIAL || version > SPA_VERSION) { + error = EINVAL; + goto pool_props_bad; + } + (void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl); + if (nvl) { + error = nvlist_dup(nvl, &rootprops, KM_SLEEP); + if (error != 0) { + nvlist_free(config); + nvlist_free(props); + return (error); + } + (void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS); + } + VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); + error = zfs_fill_zplprops_root(version, rootprops, + zplprops, NULL); + if (error) + goto pool_props_bad; + } + + buf = history_str_get(zc); + + error = spa_create(zc->zc_name, config, props, buf, zplprops); + + /* + * Set the remaining root properties + */ + if (!error && + (error = zfs_set_prop_nvlist(zc->zc_name, rootprops)) != 0) + (void) spa_destroy(zc->zc_name); + + if (buf != NULL) + history_str_free(buf); + +pool_props_bad: + nvlist_free(rootprops); + nvlist_free(zplprops); + nvlist_free(config); + nvlist_free(props); + + return (error); +} + +static int +zfs_ioc_pool_destroy(zfs_cmd_t *zc) +{ + int error; + zfs_log_history(zc); + error = spa_destroy(zc->zc_name); + return (error); +} + +static int +zfs_ioc_pool_import(zfs_cmd_t *zc) +{ + int error; + nvlist_t *config, *props = NULL; + uint64_t guid; + + if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, + &config)) != 0) + return (error); + + if (zc->zc_nvlist_src_size != 0 && (error = + get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, &props))) { + nvlist_free(config); + return (error); + } + + if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || + guid != zc->zc_guid) + error = EINVAL; + else if (zc->zc_cookie) + error = spa_import_faulted(zc->zc_name, config, + props); + else + error = spa_import(zc->zc_name, config, props); + + nvlist_free(config); + + if (props) + nvlist_free(props); + + return (error); +} + +static int +zfs_ioc_pool_export(zfs_cmd_t *zc) +{ + int error; + boolean_t force = (boolean_t)zc->zc_cookie; + + zfs_log_history(zc); + error = spa_export(zc->zc_name, NULL, force); + return (error); +} + +static int +zfs_ioc_pool_configs(zfs_cmd_t *zc) +{ + nvlist_t *configs; + int error; + + if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL) + return (EEXIST); + + error = put_nvlist(zc, configs); + + nvlist_free(configs); + + return (error); +} + +static int +zfs_ioc_pool_stats(zfs_cmd_t *zc) +{ + nvlist_t *config; + int error; + int ret = 0; + + error = spa_get_stats(zc->zc_name, &config, zc->zc_value, + sizeof (zc->zc_value)); + + if (config != NULL) { + ret = put_nvlist(zc, config); + nvlist_free(config); + + /* + * The config may be present even if 'error' is non-zero. + * In this case we return success, and preserve the real errno + * in 'zc_cookie'. + */ + zc->zc_cookie = error; + } else { + ret = error; + } + + return (ret); +} + +/* + * Try to import the given pool, returning pool stats as appropriate so that + * user land knows which devices are available and overall pool health. + */ +static int +zfs_ioc_pool_tryimport(zfs_cmd_t *zc) +{ + nvlist_t *tryconfig, *config; + int error; + + if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, + &tryconfig)) != 0) + return (error); + + config = spa_tryimport(tryconfig); + + nvlist_free(tryconfig); + + if (config == NULL) + return (EINVAL); + + error = put_nvlist(zc, config); + nvlist_free(config); + + return (error); +} + +static int +zfs_ioc_pool_scrub(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + error = spa_scrub(spa, zc->zc_cookie); + + spa_close(spa, FTAG); + + return (error); +} + +static int +zfs_ioc_pool_freeze(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + + error = spa_open(zc->zc_name, &spa, FTAG); + if (error == 0) { + spa_freeze(spa); + spa_close(spa, FTAG); + } + return (error); +} + +static int +zfs_ioc_pool_upgrade(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + if (zc->zc_cookie < spa_version(spa) || zc->zc_cookie > SPA_VERSION) { + spa_close(spa, FTAG); + return (EINVAL); + } + + spa_upgrade(spa, zc->zc_cookie); + spa_close(spa, FTAG); + + return (error); +} + +static int +zfs_ioc_pool_get_history(zfs_cmd_t *zc) +{ + spa_t *spa; + char *hist_buf; + uint64_t size; + int error; + + if ((size = zc->zc_history_len) == 0) + return (EINVAL); + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) { + spa_close(spa, FTAG); + return (ENOTSUP); + } + + hist_buf = kmem_alloc(size, KM_SLEEP); + if ((error = spa_history_get(spa, &zc->zc_history_offset, + &zc->zc_history_len, hist_buf)) == 0) { + error = xcopyout(hist_buf, + (char *)(uintptr_t)zc->zc_history, + zc->zc_history_len); + } + + spa_close(spa, FTAG); + kmem_free(hist_buf, size); + return (error); +} + +static int +zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc) +{ + int error; + + if (error = dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value)) + return (error); + + return (0); +} + +static int +zfs_ioc_obj_to_path(zfs_cmd_t *zc) +{ + objset_t *osp; + int error; + + if ((error = dmu_objset_open(zc->zc_name, DMU_OST_ZFS, + DS_MODE_USER | DS_MODE_READONLY, &osp)) != 0) + return (error); + error = zfs_obj_to_path(osp, zc->zc_obj, zc->zc_value, + sizeof (zc->zc_value)); + dmu_objset_close(osp); + + return (error); +} + +static int +zfs_ioc_vdev_add(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + nvlist_t *config, **l2cache, **spares; + uint_t nl2cache = 0, nspares = 0; + + error = spa_open(zc->zc_name, &spa, FTAG); + if (error != 0) + return (error); + + error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, + &config); + (void) nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_L2CACHE, + &l2cache, &nl2cache); + + (void) nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_SPARES, + &spares, &nspares); + + /* + * A root pool with concatenated devices is not supported. + * Thus, can not add a device to a root pool. + * + * Intent log device can not be added to a rootpool because + * during mountroot, zil is replayed, a seperated log device + * can not be accessed during the mountroot time. + * + * l2cache and spare devices are ok to be added to a rootpool. + */ + if (spa->spa_bootfs != 0 && nl2cache == 0 && nspares == 0) { + spa_close(spa, FTAG); + return (EDOM); + } + + if (error == 0) { + error = spa_vdev_add(spa, config); + nvlist_free(config); + } + spa_close(spa, FTAG); + return (error); +} + +static int +zfs_ioc_vdev_remove(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + + error = spa_open(zc->zc_name, &spa, FTAG); + if (error != 0) + return (error); + error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE); + spa_close(spa, FTAG); + return (error); +} + +static int +zfs_ioc_vdev_set_state(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + vdev_state_t newstate = VDEV_STATE_UNKNOWN; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + switch (zc->zc_cookie) { + case VDEV_STATE_ONLINE: + error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate); + break; + + case VDEV_STATE_OFFLINE: + error = vdev_offline(spa, zc->zc_guid, zc->zc_obj); + break; + + case VDEV_STATE_FAULTED: + error = vdev_fault(spa, zc->zc_guid); + break; + + case VDEV_STATE_DEGRADED: + error = vdev_degrade(spa, zc->zc_guid); + break; + + default: + error = EINVAL; + } + zc->zc_cookie = newstate; + spa_close(spa, FTAG); + return (error); +} + +static int +zfs_ioc_vdev_attach(zfs_cmd_t *zc) +{ + spa_t *spa; + int replacing = zc->zc_cookie; + nvlist_t *config; + int error; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, + &config)) == 0) { + error = spa_vdev_attach(spa, zc->zc_guid, config, replacing); + nvlist_free(config); + } + + spa_close(spa, FTAG); + return (error); +} + +static int +zfs_ioc_vdev_detach(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + error = spa_vdev_detach(spa, zc->zc_guid, B_FALSE); + + spa_close(spa, FTAG); + return (error); +} + +static int +zfs_ioc_vdev_setpath(zfs_cmd_t *zc) +{ + spa_t *spa; + char *path = zc->zc_value; + uint64_t guid = zc->zc_guid; + int error; + + error = spa_open(zc->zc_name, &spa, FTAG); + if (error != 0) + return (error); + + error = spa_vdev_setpath(spa, guid, path); + spa_close(spa, FTAG); + return (error); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_nvlist_dst_size size of buffer for property nvlist + * + * outputs: + * zc_objset_stats stats + * zc_nvlist_dst property nvlist + * zc_nvlist_dst_size size of property nvlist + */ +static int +zfs_ioc_objset_stats(zfs_cmd_t *zc) +{ + objset_t *os = NULL; + int error; + nvlist_t *nv; + + if (error = dmu_objset_open(zc->zc_name, + DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os)) + return (error); + + dmu_objset_fast_stat(os, &zc->zc_objset_stats); + + if (zc->zc_nvlist_dst != 0 && + (error = dsl_prop_get_all(os, &nv, FALSE)) == 0) { + dmu_objset_stats(os, nv); + /* + * NB: zvol_get_stats() will read the objset contents, + * which we aren't supposed to do with a + * DS_MODE_USER hold, because it could be + * inconsistent. So this is a bit of a workaround... + */ + if (!zc->zc_objset_stats.dds_inconsistent) { + if (dmu_objset_type(os) == DMU_OST_ZVOL) + VERIFY(zvol_get_stats(os, nv) == 0); + } + error = put_nvlist(zc, nv); + nvlist_free(nv); + } + + dmu_objset_close(os); + return (error); +} + +static int +nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop) +{ + uint64_t value; + int error; + + /* + * zfs_get_zplprop() will either find a value or give us + * the default value (if there is one). + */ + if ((error = zfs_get_zplprop(os, prop, &value)) != 0) + return (error); + VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0); + return (0); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_nvlist_dst_size size of buffer for zpl property nvlist + * + * outputs: + * zc_nvlist_dst zpl property nvlist + * zc_nvlist_dst_size size of zpl property nvlist + */ +static int +zfs_ioc_objset_zplprops(zfs_cmd_t *zc) +{ + objset_t *os; + int err; + + if (err = dmu_objset_open(zc->zc_name, + DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os)) + return (err); + + dmu_objset_fast_stat(os, &zc->zc_objset_stats); + + /* + * NB: nvl_add_zplprop() will read the objset contents, + * which we aren't supposed to do with a DS_MODE_USER + * hold, because it could be inconsistent. + */ + if (zc->zc_nvlist_dst != NULL && + !zc->zc_objset_stats.dds_inconsistent && + dmu_objset_type(os) == DMU_OST_ZFS) { + nvlist_t *nv; + + VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); + if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 && + (err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 && + (err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 && + (err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0) + err = put_nvlist(zc, nv); + nvlist_free(nv); + } else { + err = ENOENT; + } + dmu_objset_close(os); + return (err); +} + +static void +zfs_prefetch_datasets(zfs_cmd_t *zc, objset_t *os, char *p) +{ + uint64_t cookie = 0; + int error; + + do { + error = dmu_dir_list_next(os, + sizeof (zc->zc_name) - (p - zc->zc_name), p, + NULL, &cookie); + } while (error == 0 && !INGLOBALZONE(curproc) && + !zone_dataset_visible(zc->zc_name, NULL) && + !dmu_objset_prefetch(zc->zc_name, NULL)); +} + +static void +zfs_prefetch_snapshots(zfs_cmd_t *zc) +{ + dmu_objset_find(zc->zc_name, dmu_objset_prefetch, + NULL, DS_FIND_SNAPSHOTS); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_cookie zap cursor + * zc_nvlist_dst_size size of buffer for property nvlist + * + * outputs: + * zc_name name of next filesystem + * zc_objset_stats stats + * zc_nvlist_dst property nvlist + * zc_nvlist_dst_size size of property nvlist + */ +static int +zfs_ioc_dataset_list_next(zfs_cmd_t *zc) +{ + objset_t *os; + int error; + char *p; + + if (error = dmu_objset_open(zc->zc_name, + DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os)) { + if (error == ENOENT) + error = ESRCH; + return (error); + } + + p = strrchr(zc->zc_name, '/'); + if (p == NULL || p[1] != '\0') + (void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name)); + p = zc->zc_name + strlen(zc->zc_name); + + if (zc->zc_cookie == 0) + zfs_prefetch_datasets(zc, os, p); + do { + error = dmu_dir_list_next(os, + sizeof (zc->zc_name) - (p - zc->zc_name), p, + NULL, &zc->zc_cookie); + if (error == ENOENT) + error = ESRCH; + } while (error == 0 && !INGLOBALZONE(curproc) && + !zone_dataset_visible(zc->zc_name, NULL)); + dmu_objset_close(os); + + /* + * If it's a hidden dataset (ie. with a '$' in its name), don't + * try to get stats for it. Userland will skip over it. + */ + if (error == 0 && strchr(zc->zc_name, '$') == NULL) + error = zfs_ioc_objset_stats(zc); /* fill in the stats */ + + return (error); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_cookie zap cursor + * zc_nvlist_dst_size size of buffer for property nvlist + * + * outputs: + * zc_name name of next snapshot + * zc_objset_stats stats + * zc_nvlist_dst property nvlist + * zc_nvlist_dst_size size of property nvlist + */ +static int +zfs_ioc_snapshot_list_next(zfs_cmd_t *zc) +{ + objset_t *os; + int error; + + error = dmu_objset_open(zc->zc_name, + DMU_OST_ANY, DS_MODE_USER | DS_MODE_READONLY, &os); + if (error) + return (error == ENOENT ? ESRCH : error); + + if (zc->zc_cookie == 0) + zfs_prefetch_snapshots(zc); + /* + * A dataset name of maximum length cannot have any snapshots, + * so exit immediately. + */ + if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >= MAXNAMELEN) { + dmu_objset_close(os); + return (ESRCH); + } + + error = dmu_snapshot_list_next(os, + sizeof (zc->zc_name) - strlen(zc->zc_name), + zc->zc_name + strlen(zc->zc_name), NULL, &zc->zc_cookie, NULL); + dmu_objset_close(os); + if (error == 0) + error = zfs_ioc_objset_stats(zc); /* fill in the stats */ + else if (error == ENOENT) + error = ESRCH; + + /* if we failed, undo the @ that we tacked on to zc_name */ + if (error) + *strchr(zc->zc_name, '@') = '\0'; + return (error); +} + +int +zfs_set_prop_nvlist(const char *name, nvlist_t *nvl) +{ + nvpair_t *elem; + int error; + uint64_t intval; + char *strval; + + /* + * First validate permission to set all of the properties + */ + elem = NULL; + while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { + const char *propname = nvpair_name(elem); + zfs_prop_t prop = zfs_name_to_prop(propname); + + if (prop == ZPROP_INVAL) { + /* + * If this is a user-defined property, it must be a + * string, and there is no further validation to do. + */ + if (!zfs_prop_user(propname) || + nvpair_type(elem) != DATA_TYPE_STRING) + return (EINVAL); + + if (error = zfs_secpolicy_write_perms(name, + ZFS_DELEG_PERM_USERPROP, CRED())) + return (error); + continue; + } + + if ((error = zfs_secpolicy_setprop(name, prop, CRED())) != 0) + return (error); + + /* + * Check that this value is valid for this pool version + */ + switch (prop) { + case ZFS_PROP_COMPRESSION: + /* + * If the user specified gzip compression, make sure + * the SPA supports it. We ignore any errors here since + * we'll catch them later. + */ + if (nvpair_type(elem) == DATA_TYPE_UINT64 && + nvpair_value_uint64(elem, &intval) == 0) { + if (intval >= ZIO_COMPRESS_GZIP_1 && + intval <= ZIO_COMPRESS_GZIP_9 && + zfs_earlier_version(name, + SPA_VERSION_GZIP_COMPRESSION)) + return (ENOTSUP); + + /* + * If this is a bootable dataset then + * verify that the compression algorithm + * is supported for booting. We must return + * something other than ENOTSUP since it + * implies a downrev pool version. + */ + if (zfs_is_bootfs(name) && + !BOOTFS_COMPRESS_VALID(intval)) + return (ERANGE); + } + break; + + case ZFS_PROP_COPIES: + if (zfs_earlier_version(name, + SPA_VERSION_DITTO_BLOCKS)) + return (ENOTSUP); + break; + + case ZFS_PROP_SHARESMB: + if (zpl_earlier_version(name, ZPL_VERSION_FUID)) + return (ENOTSUP); + break; + + case ZFS_PROP_ACLINHERIT: + if (nvpair_type(elem) == DATA_TYPE_UINT64 && + nvpair_value_uint64(elem, &intval) == 0) + if (intval == ZFS_ACL_PASSTHROUGH_X && + zfs_earlier_version(name, + SPA_VERSION_PASSTHROUGH_X)) + return (ENOTSUP); + } + } + + elem = NULL; + while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { + const char *propname = nvpair_name(elem); + zfs_prop_t prop = zfs_name_to_prop(propname); + + if (prop == ZPROP_INVAL) { + VERIFY(nvpair_value_string(elem, &strval) == 0); + error = dsl_prop_set(name, propname, 1, + strlen(strval) + 1, strval); + if (error == 0) + continue; + else + return (error); + } + + switch (prop) { + case ZFS_PROP_QUOTA: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = dsl_dir_set_quota(name, intval)) != 0) + return (error); + break; + + case ZFS_PROP_REFQUOTA: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = dsl_dataset_set_quota(name, intval)) != 0) + return (error); + break; + + case ZFS_PROP_RESERVATION: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = dsl_dir_set_reservation(name, + intval)) != 0) + return (error); + break; + + case ZFS_PROP_REFRESERVATION: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = dsl_dataset_set_reservation(name, + intval)) != 0) + return (error); + break; + + case ZFS_PROP_VOLSIZE: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = zvol_set_volsize(name, + ddi_driver_major(zfs_dip), intval)) != 0) + return (error); + break; + + case ZFS_PROP_VOLBLOCKSIZE: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = zvol_set_volblocksize(name, intval)) != 0) + return (error); + break; + + case ZFS_PROP_VERSION: + if ((error = nvpair_value_uint64(elem, &intval)) != 0 || + (error = zfs_set_version(name, intval)) != 0) + return (error); + break; + + default: + if (nvpair_type(elem) == DATA_TYPE_STRING) { + if (zfs_prop_get_type(prop) != + PROP_TYPE_STRING) + return (EINVAL); + VERIFY(nvpair_value_string(elem, &strval) == 0); + if ((error = dsl_prop_set(name, + nvpair_name(elem), 1, strlen(strval) + 1, + strval)) != 0) + return (error); + } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { + const char *unused; + + VERIFY(nvpair_value_uint64(elem, &intval) == 0); + + switch (zfs_prop_get_type(prop)) { + case PROP_TYPE_NUMBER: + break; + case PROP_TYPE_STRING: + return (EINVAL); + case PROP_TYPE_INDEX: + if (zfs_prop_index_to_string(prop, + intval, &unused) != 0) + return (EINVAL); + break; + default: + cmn_err(CE_PANIC, + "unknown property type"); + break; + } + + if ((error = dsl_prop_set(name, propname, + 8, 1, &intval)) != 0) + return (error); + } else { + return (EINVAL); + } + break; + } + } + + return (0); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_value name of property to inherit + * zc_nvlist_src{_size} nvlist of properties to apply + * zc_cookie clear existing local props? + * + * outputs: none + */ +static int +zfs_ioc_set_prop(zfs_cmd_t *zc) +{ + nvlist_t *nvl; + int error; + + if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &nvl)) != 0) + return (error); + + if (zc->zc_cookie) { + nvlist_t *origprops; + objset_t *os; + + if (dmu_objset_open(zc->zc_name, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os) == 0) { + if (dsl_prop_get_all(os, &origprops, TRUE) == 0) { + clear_props(zc->zc_name, origprops); + nvlist_free(origprops); + } + dmu_objset_close(os); + } + + } + + error = zfs_set_prop_nvlist(zc->zc_name, nvl); + + nvlist_free(nvl); + return (error); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_value name of property to inherit + * + * outputs: none + */ +static int +zfs_ioc_inherit_prop(zfs_cmd_t *zc) +{ + /* the property name has been validated by zfs_secpolicy_inherit() */ + return (dsl_prop_set(zc->zc_name, zc->zc_value, 0, 0, NULL)); +} + +static int +zfs_ioc_pool_set_props(zfs_cmd_t *zc) +{ + nvlist_t *props; + spa_t *spa; + int error; + + if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &props))) + return (error); + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { + nvlist_free(props); + return (error); + } + + error = spa_prop_set(spa, props); + + nvlist_free(props); + spa_close(spa, FTAG); + + return (error); +} + +static int +zfs_ioc_pool_get_props(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + nvlist_t *nvp = NULL; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + error = spa_prop_get(spa, &nvp); + + if (error == 0 && zc->zc_nvlist_dst != NULL) + error = put_nvlist(zc, nvp); + else + error = EFAULT; + + spa_close(spa, FTAG); + + if (nvp) + nvlist_free(nvp); + return (error); +} + +static int +zfs_ioc_iscsi_perm_check(zfs_cmd_t *zc) +{ + nvlist_t *nvp; + int error; + uint32_t uid; + uint32_t gid; + uint32_t *groups; + uint_t group_cnt; + cred_t *usercred; + + if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &nvp)) != 0) { + return (error); + } + + if ((error = nvlist_lookup_uint32(nvp, + ZFS_DELEG_PERM_UID, &uid)) != 0) { + nvlist_free(nvp); + return (EPERM); + } + + if ((error = nvlist_lookup_uint32(nvp, + ZFS_DELEG_PERM_GID, &gid)) != 0) { + nvlist_free(nvp); + return (EPERM); + } + + if ((error = nvlist_lookup_uint32_array(nvp, ZFS_DELEG_PERM_GROUPS, + &groups, &group_cnt)) != 0) { + nvlist_free(nvp); + return (EPERM); + } + usercred = cralloc(); + if ((crsetugid(usercred, uid, gid) != 0) || + (crsetgroups(usercred, group_cnt, (gid_t *)groups) != 0)) { + nvlist_free(nvp); + crfree(usercred); + return (EPERM); + } + nvlist_free(nvp); + error = dsl_deleg_access(zc->zc_name, + zfs_prop_to_name(ZFS_PROP_SHAREISCSI), usercred); + crfree(usercred); + return (error); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_nvlist_src{_size} nvlist of delegated permissions + * zc_perm_action allow/unallow flag + * + * outputs: none + */ +static int +zfs_ioc_set_fsacl(zfs_cmd_t *zc) +{ + int error; + nvlist_t *fsaclnv = NULL; + + if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &fsaclnv)) != 0) + return (error); + + /* + * Verify nvlist is constructed correctly + */ + if ((error = zfs_deleg_verify_nvlist(fsaclnv)) != 0) { + nvlist_free(fsaclnv); + return (EINVAL); + } + + /* + * If we don't have PRIV_SYS_MOUNT, then validate + * that user is allowed to hand out each permission in + * the nvlist(s) + */ + + error = secpolicy_zfs(CRED()); + if (error) { + if (zc->zc_perm_action == B_FALSE) { + error = dsl_deleg_can_allow(zc->zc_name, + fsaclnv, CRED()); + } else { + error = dsl_deleg_can_unallow(zc->zc_name, + fsaclnv, CRED()); + } + } + + if (error == 0) + error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action); + + nvlist_free(fsaclnv); + return (error); +} + +/* + * inputs: + * zc_name name of filesystem + * + * outputs: + * zc_nvlist_src{_size} nvlist of delegated permissions + */ +static int +zfs_ioc_get_fsacl(zfs_cmd_t *zc) +{ + nvlist_t *nvp; + int error; + + if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) { + error = put_nvlist(zc, nvp); + nvlist_free(nvp); + } + + return (error); +} + +/* + * inputs: + * zc_name name of volume + * + * outputs: none + */ +static int +zfs_ioc_create_minor(zfs_cmd_t *zc) +{ + return (zvol_create_minor(zc->zc_name, ddi_driver_major(zfs_dip))); +} + +/* + * inputs: + * zc_name name of volume + * + * outputs: none + */ +static int +zfs_ioc_remove_minor(zfs_cmd_t *zc) +{ + return (zvol_remove_minor(zc->zc_name)); +} + +/* + * Search the vfs list for a specified resource. Returns a pointer to it + * or NULL if no suitable entry is found. The caller of this routine + * is responsible for releasing the returned vfs pointer. + */ +static vfs_t * +zfs_get_vfs(const char *resource) +{ + struct vfs *vfsp; + struct vfs *vfs_found = NULL; + + vfs_list_read_lock(); + vfsp = rootvfs; + do { + if (strcmp(refstr_value(vfsp->vfs_resource), resource) == 0) { + VFS_HOLD(vfsp); + vfs_found = vfsp; + break; + } + vfsp = vfsp->vfs_next; + } while (vfsp != rootvfs); + vfs_list_unlock(); + return (vfs_found); +} + +/* ARGSUSED */ +static void +zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) +{ + zfs_creat_t *zct = arg; + + zfs_create_fs(os, cr, zct->zct_zplprops, tx); +} + +#define ZFS_PROP_UNDEFINED ((uint64_t)-1) + +/* + * inputs: + * createprops list of properties requested by creator + * default_zplver zpl version to use if unspecified in createprops + * fuids_ok fuids allowed in this version of the spa? + * os parent objset pointer (NULL if root fs) + * + * outputs: + * zplprops values for the zplprops we attach to the master node object + * is_ci true if requested file system will be purely case-insensitive + * + * Determine the settings for utf8only, normalization and + * casesensitivity. Specific values may have been requested by the + * creator and/or we can inherit values from the parent dataset. If + * the file system is of too early a vintage, a creator can not + * request settings for these properties, even if the requested + * setting is the default value. We don't actually want to create dsl + * properties for these, so remove them from the source nvlist after + * processing. + */ +static int +zfs_fill_zplprops_impl(objset_t *os, uint64_t default_zplver, + boolean_t fuids_ok, nvlist_t *createprops, nvlist_t *zplprops, + boolean_t *is_ci) +{ + uint64_t zplver = default_zplver; + uint64_t sense = ZFS_PROP_UNDEFINED; + uint64_t norm = ZFS_PROP_UNDEFINED; + uint64_t u8 = ZFS_PROP_UNDEFINED; + + ASSERT(zplprops != NULL); + + /* + * Pull out creator prop choices, if any. + */ + if (createprops) { + (void) nvlist_lookup_uint64(createprops, + zfs_prop_to_name(ZFS_PROP_VERSION), &zplver); + (void) nvlist_lookup_uint64(createprops, + zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm); + (void) nvlist_remove_all(createprops, + zfs_prop_to_name(ZFS_PROP_NORMALIZE)); + (void) nvlist_lookup_uint64(createprops, + zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8); + (void) nvlist_remove_all(createprops, + zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); + (void) nvlist_lookup_uint64(createprops, + zfs_prop_to_name(ZFS_PROP_CASE), &sense); + (void) nvlist_remove_all(createprops, + zfs_prop_to_name(ZFS_PROP_CASE)); + } + + /* + * If the zpl version requested is whacky or the file system + * or pool is version is too "young" to support normalization + * and the creator tried to set a value for one of the props, + * error out. + */ + if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) || + (zplver >= ZPL_VERSION_FUID && !fuids_ok) || + (zplver < ZPL_VERSION_NORMALIZATION && + (norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED || + sense != ZFS_PROP_UNDEFINED))) + return (ENOTSUP); + + /* + * Put the version in the zplprops + */ + VERIFY(nvlist_add_uint64(zplprops, + zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0); + + if (norm == ZFS_PROP_UNDEFINED) + VERIFY(zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm) == 0); + VERIFY(nvlist_add_uint64(zplprops, + zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0); + + /* + * If we're normalizing, names must always be valid UTF-8 strings. + */ + if (norm) + u8 = 1; + if (u8 == ZFS_PROP_UNDEFINED) + VERIFY(zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8) == 0); + VERIFY(nvlist_add_uint64(zplprops, + zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0); + + if (sense == ZFS_PROP_UNDEFINED) + VERIFY(zfs_get_zplprop(os, ZFS_PROP_CASE, &sense) == 0); + VERIFY(nvlist_add_uint64(zplprops, + zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0); + + if (is_ci) + *is_ci = (sense == ZFS_CASE_INSENSITIVE); + + return (0); +} + +static int +zfs_fill_zplprops(const char *dataset, nvlist_t *createprops, + nvlist_t *zplprops, boolean_t *is_ci) +{ + boolean_t fuids_ok = B_TRUE; + uint64_t zplver = ZPL_VERSION; + objset_t *os = NULL; + char parentname[MAXNAMELEN]; + char *cp; + int error; + + (void) strlcpy(parentname, dataset, sizeof (parentname)); + cp = strrchr(parentname, '/'); + ASSERT(cp != NULL); + cp[0] = '\0'; + + if (zfs_earlier_version(dataset, SPA_VERSION_FUID)) { + zplver = ZPL_VERSION_FUID - 1; + fuids_ok = B_FALSE; + } + + /* + * Open parent object set so we can inherit zplprop values. + */ + if ((error = dmu_objset_open(parentname, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os)) != 0) + return (error); + + error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, createprops, + zplprops, is_ci); + dmu_objset_close(os); + return (error); +} + +static int +zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops, + nvlist_t *zplprops, boolean_t *is_ci) +{ + boolean_t fuids_ok = B_TRUE; + uint64_t zplver = ZPL_VERSION; + int error; + + if (spa_vers < SPA_VERSION_FUID) { + zplver = ZPL_VERSION_FUID - 1; + fuids_ok = B_FALSE; + } + + error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, createprops, + zplprops, is_ci); + return (error); +} + +/* + * inputs: + * zc_objset_type type of objset to create (fs vs zvol) + * zc_name name of new objset + * zc_value name of snapshot to clone from (may be empty) + * zc_nvlist_src{_size} nvlist of properties to apply + * + * outputs: none + */ +static int +zfs_ioc_create(zfs_cmd_t *zc) +{ + objset_t *clone; + int error = 0; + zfs_creat_t zct; + nvlist_t *nvprops = NULL; + void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); + dmu_objset_type_t type = zc->zc_objset_type; + + switch (type) { + + case DMU_OST_ZFS: + cbfunc = zfs_create_cb; + break; + + case DMU_OST_ZVOL: + cbfunc = zvol_create_cb; + break; + + default: + cbfunc = NULL; + break; + } + if (strchr(zc->zc_name, '@') || + strchr(zc->zc_name, '%')) + return (EINVAL); + + if (zc->zc_nvlist_src != NULL && + (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &nvprops)) != 0) + return (error); + + zct.zct_zplprops = NULL; + zct.zct_props = nvprops; + + if (zc->zc_value[0] != '\0') { + /* + * We're creating a clone of an existing snapshot. + */ + zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; + if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0) { + nvlist_free(nvprops); + return (EINVAL); + } + + error = dmu_objset_open(zc->zc_value, type, + DS_MODE_USER | DS_MODE_READONLY, &clone); + if (error) { + nvlist_free(nvprops); + return (error); + } + + error = dmu_objset_create(zc->zc_name, type, clone, 0, + NULL, NULL); + if (error) { + dmu_objset_close(clone); + nvlist_free(nvprops); + return (error); + } + dmu_objset_close(clone); + } else { + boolean_t is_insensitive = B_FALSE; + + if (cbfunc == NULL) { + nvlist_free(nvprops); + return (EINVAL); + } + + if (type == DMU_OST_ZVOL) { + uint64_t volsize, volblocksize; + + if (nvprops == NULL || + nvlist_lookup_uint64(nvprops, + zfs_prop_to_name(ZFS_PROP_VOLSIZE), + &volsize) != 0) { + nvlist_free(nvprops); + return (EINVAL); + } + + if ((error = nvlist_lookup_uint64(nvprops, + zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), + &volblocksize)) != 0 && error != ENOENT) { + nvlist_free(nvprops); + return (EINVAL); + } + + if (error != 0) + volblocksize = zfs_prop_default_numeric( + ZFS_PROP_VOLBLOCKSIZE); + + if ((error = zvol_check_volblocksize( + volblocksize)) != 0 || + (error = zvol_check_volsize(volsize, + volblocksize)) != 0) { + nvlist_free(nvprops); + return (error); + } + } else if (type == DMU_OST_ZFS) { + int error; + + /* + * We have to have normalization and + * case-folding flags correct when we do the + * file system creation, so go figure them out + * now. + */ + VERIFY(nvlist_alloc(&zct.zct_zplprops, + NV_UNIQUE_NAME, KM_SLEEP) == 0); + error = zfs_fill_zplprops(zc->zc_name, nvprops, + zct.zct_zplprops, &is_insensitive); + if (error != 0) { + nvlist_free(nvprops); + nvlist_free(zct.zct_zplprops); + return (error); + } + } + error = dmu_objset_create(zc->zc_name, type, NULL, + is_insensitive ? DS_FLAG_CI_DATASET : 0, cbfunc, &zct); + nvlist_free(zct.zct_zplprops); + } + + /* + * It would be nice to do this atomically. + */ + if (error == 0) { + if ((error = zfs_set_prop_nvlist(zc->zc_name, nvprops)) != 0) + (void) dmu_objset_destroy(zc->zc_name); + } + nvlist_free(nvprops); + return (error); +} + +struct snap_prop_arg { + nvlist_t *nvprops; + const char *snapname; +}; + +static int +set_snap_props(char *name, void *arg) +{ + struct snap_prop_arg *snpa = arg; + int len = strlen(name) + strlen(snpa->snapname) + 2; + char *buf = kmem_alloc(len, KM_SLEEP); + int err; + + (void) snprintf(buf, len, "%s@%s", name, snpa->snapname); + err = zfs_set_prop_nvlist(buf, snpa->nvprops); + if (err) + (void) dmu_objset_destroy(buf); + kmem_free(buf, len); + return (err); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_value short name of snapshot + * zc_cookie recursive flag + * + * outputs: none + */ +static int +zfs_ioc_snapshot(zfs_cmd_t *zc) +{ + nvlist_t *nvprops = NULL; + int error; + boolean_t recursive = zc->zc_cookie; + + if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) + return (EINVAL); + + if (zc->zc_nvlist_src != NULL && + (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &nvprops)) != 0) + return (error); + + error = dmu_objset_snapshot(zc->zc_name, zc->zc_value, recursive); + + /* + * It would be nice to do this atomically. + */ + if (error == 0) { + struct snap_prop_arg snpa; + snpa.nvprops = nvprops; + snpa.snapname = zc->zc_value; + if (recursive) { + error = dmu_objset_find(zc->zc_name, + set_snap_props, &snpa, DS_FIND_CHILDREN); + if (error) { + (void) dmu_snapshots_destroy(zc->zc_name, + zc->zc_value); + } + } else { + error = set_snap_props(zc->zc_name, &snpa); + } + } + nvlist_free(nvprops); + return (error); +} + +int +zfs_unmount_snap(char *name, void *arg) +{ + vfs_t *vfsp = NULL; + + if (arg) { + char *snapname = arg; + int len = strlen(name) + strlen(snapname) + 2; + char *buf = kmem_alloc(len, KM_SLEEP); + + (void) strcpy(buf, name); + (void) strcat(buf, "@"); + (void) strcat(buf, snapname); + vfsp = zfs_get_vfs(buf); + kmem_free(buf, len); + } else if (strchr(name, '@')) { + vfsp = zfs_get_vfs(name); + } + + if (vfsp) { + /* + * Always force the unmount for snapshots. + */ + int flag = MS_FORCE; + int err; + + if ((err = vn_vfswlock(vfsp->vfs_vnodecovered)) != 0) { + VFS_RELE(vfsp); + return (err); + } + VFS_RELE(vfsp); + if ((err = dounmount(vfsp, flag, kcred)) != 0) + return (err); + } + return (0); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_value short name of snapshot + * + * outputs: none + */ +static int +zfs_ioc_destroy_snaps(zfs_cmd_t *zc) +{ + int err; + + if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) + return (EINVAL); + err = dmu_objset_find(zc->zc_name, + zfs_unmount_snap, zc->zc_value, DS_FIND_CHILDREN); + if (err) + return (err); + return (dmu_snapshots_destroy(zc->zc_name, zc->zc_value)); +} + +/* + * inputs: + * zc_name name of dataset to destroy + * zc_objset_type type of objset + * + * outputs: none + */ +static int +zfs_ioc_destroy(zfs_cmd_t *zc) +{ + if (strchr(zc->zc_name, '@') && zc->zc_objset_type == DMU_OST_ZFS) { + int err = zfs_unmount_snap(zc->zc_name, NULL); + if (err) + return (err); + } + + return (dmu_objset_destroy(zc->zc_name)); +} + +/* + * inputs: + * zc_name name of dataset to rollback (to most recent snapshot) + * + * outputs: none + */ +static int +zfs_ioc_rollback(zfs_cmd_t *zc) +{ + objset_t *os; + int error; + zfsvfs_t *zfsvfs = NULL; + + /* + * Get the zfsvfs for the receiving objset. There + * won't be one if we're operating on a zvol, if the + * objset doesn't exist yet, or is not mounted. + */ + error = dmu_objset_open(zc->zc_name, DMU_OST_ANY, DS_MODE_USER, &os); + if (error) + return (error); + + if (dmu_objset_type(os) == DMU_OST_ZFS) { + mutex_enter(&os->os->os_user_ptr_lock); + zfsvfs = dmu_objset_get_user(os); + if (zfsvfs != NULL) + VFS_HOLD(zfsvfs->z_vfs); + mutex_exit(&os->os->os_user_ptr_lock); + } + + if (zfsvfs != NULL) { + char *osname; + int mode; + + osname = kmem_alloc(MAXNAMELEN, KM_SLEEP); + error = zfs_suspend_fs(zfsvfs, osname, &mode); + if (error == 0) { + int resume_err; + + ASSERT(strcmp(osname, zc->zc_name) == 0); + error = dmu_objset_rollback(os); + resume_err = zfs_resume_fs(zfsvfs, osname, mode); + error = error ? error : resume_err; + } else { + dmu_objset_close(os); + } + kmem_free(osname, MAXNAMELEN); + VFS_RELE(zfsvfs->z_vfs); + } else { + error = dmu_objset_rollback(os); + } + /* Note, the dmu_objset_rollback() releases the objset for us. */ + + return (error); +} + +/* + * inputs: + * zc_name old name of dataset + * zc_value new name of dataset + * zc_cookie recursive flag (only valid for snapshots) + * + * outputs: none + */ +static int +zfs_ioc_rename(zfs_cmd_t *zc) +{ + boolean_t recursive = zc->zc_cookie & 1; + + zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; + if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || + strchr(zc->zc_value, '%')) + return (EINVAL); + + /* + * Unmount snapshot unless we're doing a recursive rename, + * in which case the dataset code figures out which snapshots + * to unmount. + */ + if (!recursive && strchr(zc->zc_name, '@') != NULL && + zc->zc_objset_type == DMU_OST_ZFS) { + int err = zfs_unmount_snap(zc->zc_name, NULL); + if (err) + return (err); + } + return (dmu_objset_rename(zc->zc_name, zc->zc_value, recursive)); +} + +static void +clear_props(char *dataset, nvlist_t *props) +{ + zfs_cmd_t *zc; + nvpair_t *prop; + + if (props == NULL) + return; + zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP); + (void) strcpy(zc->zc_name, dataset); + for (prop = nvlist_next_nvpair(props, NULL); prop; + prop = nvlist_next_nvpair(props, prop)) { + (void) strcpy(zc->zc_value, nvpair_name(prop)); + if (zfs_secpolicy_inherit(zc, CRED()) == 0) + (void) zfs_ioc_inherit_prop(zc); + } + kmem_free(zc, sizeof (zfs_cmd_t)); +} + +/* + * inputs: + * zc_name name of containing filesystem + * zc_nvlist_src{_size} nvlist of properties to apply + * zc_value name of snapshot to create + * zc_string name of clone origin (if DRR_FLAG_CLONE) + * zc_cookie file descriptor to recv from + * zc_begin_record the BEGIN record of the stream (not byteswapped) + * zc_guid force flag + * + * outputs: + * zc_cookie number of bytes read + */ +static int +zfs_ioc_recv(zfs_cmd_t *zc) +{ + file_t *fp; + objset_t *os; + dmu_recv_cookie_t drc; + zfsvfs_t *zfsvfs = NULL; + boolean_t force = (boolean_t)zc->zc_guid; + int error, fd; + offset_t off; + nvlist_t *props = NULL; + nvlist_t *origprops = NULL; + objset_t *origin = NULL; + char *tosnap; + char tofs[ZFS_MAXNAMELEN]; + + if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || + strchr(zc->zc_value, '@') == NULL || + strchr(zc->zc_value, '%')) + return (EINVAL); + + (void) strcpy(tofs, zc->zc_value); + tosnap = strchr(tofs, '@'); + *tosnap = '\0'; + tosnap++; + + if (zc->zc_nvlist_src != NULL && + (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, + &props)) != 0) + return (error); + + fd = zc->zc_cookie; + fp = getf(fd); + if (fp == NULL) { + nvlist_free(props); + return (EBADF); + } + + if (dmu_objset_open(tofs, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &os) == 0) { + /* + * Try to get the zfsvfs for the receiving objset. + * There won't be one if we're operating on a zvol, + * if the objset doesn't exist yet, or is not mounted. + */ + mutex_enter(&os->os->os_user_ptr_lock); + if (zfsvfs = dmu_objset_get_user(os)) { + if (!mutex_tryenter(&zfsvfs->z_online_recv_lock)) { + mutex_exit(&os->os->os_user_ptr_lock); + dmu_objset_close(os); + zfsvfs = NULL; + error = EBUSY; + goto out; + } + VFS_HOLD(zfsvfs->z_vfs); + } + mutex_exit(&os->os->os_user_ptr_lock); + + /* + * If new properties are supplied, they are to completely + * replace the existing ones, so stash away the existing ones. + */ + if (props) + (void) dsl_prop_get_all(os, &origprops, TRUE); + + dmu_objset_close(os); + } + + if (zc->zc_string[0]) { + error = dmu_objset_open(zc->zc_string, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &origin); + if (error) + goto out; + } + + error = dmu_recv_begin(tofs, tosnap, &zc->zc_begin_record, + force, origin, zfsvfs != NULL, &drc); + if (origin) + dmu_objset_close(origin); + if (error) + goto out; + + /* + * Reset properties. We do this before we receive the stream + * so that the properties are applied to the new data. + */ + if (props) { + clear_props(tofs, origprops); + /* + * XXX - Note, this is all-or-nothing; should be best-effort. + */ + (void) zfs_set_prop_nvlist(tofs, props); + } + + off = fp->f_offset; + error = dmu_recv_stream(&drc, fp->f_vnode, &off); + + if (error == 0 && zfsvfs) { + char *osname; + int mode; + + /* online recv */ + osname = kmem_alloc(MAXNAMELEN, KM_SLEEP); + error = zfs_suspend_fs(zfsvfs, osname, &mode); + if (error == 0) { + int resume_err; + + error = dmu_recv_end(&drc); + resume_err = zfs_resume_fs(zfsvfs, osname, mode); + error = error ? error : resume_err; + } else { + dmu_recv_abort_cleanup(&drc); + } + kmem_free(osname, MAXNAMELEN); + } else if (error == 0) { + error = dmu_recv_end(&drc); + } + + zc->zc_cookie = off - fp->f_offset; + if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0) + fp->f_offset = off; + + /* + * On error, restore the original props. + */ + if (error && props) { + clear_props(tofs, props); + (void) zfs_set_prop_nvlist(tofs, origprops); + } +out: + if (zfsvfs) { + mutex_exit(&zfsvfs->z_online_recv_lock); + VFS_RELE(zfsvfs->z_vfs); + } + nvlist_free(props); + nvlist_free(origprops); + releasef(fd); + return (error); +} + +/* + * inputs: + * zc_name name of snapshot to send + * zc_value short name of incremental fromsnap (may be empty) + * zc_cookie file descriptor to send stream to + * zc_obj fromorigin flag (mutually exclusive with zc_value) + * + * outputs: none + */ +static int +zfs_ioc_send(zfs_cmd_t *zc) +{ + objset_t *fromsnap = NULL; + objset_t *tosnap; + file_t *fp; + int error; + offset_t off; + + error = dmu_objset_open(zc->zc_name, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &tosnap); + if (error) + return (error); + + if (zc->zc_value[0] != '\0') { + char *buf; + char *cp; + + buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); + (void) strncpy(buf, zc->zc_name, MAXPATHLEN); + cp = strchr(buf, '@'); + if (cp) + *(cp+1) = 0; + (void) strncat(buf, zc->zc_value, MAXPATHLEN); + error = dmu_objset_open(buf, DMU_OST_ANY, + DS_MODE_USER | DS_MODE_READONLY, &fromsnap); + kmem_free(buf, MAXPATHLEN); + if (error) { + dmu_objset_close(tosnap); + return (error); + } + } + + fp = getf(zc->zc_cookie); + if (fp == NULL) { + dmu_objset_close(tosnap); + if (fromsnap) + dmu_objset_close(fromsnap); + return (EBADF); + } + + off = fp->f_offset; + error = dmu_sendbackup(tosnap, fromsnap, zc->zc_obj, fp->f_vnode, &off); + + if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0) + fp->f_offset = off; + releasef(zc->zc_cookie); + if (fromsnap) + dmu_objset_close(fromsnap); + dmu_objset_close(tosnap); + return (error); +} + +static int +zfs_ioc_inject_fault(zfs_cmd_t *zc) +{ + int id, error; + + error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id, + &zc->zc_inject_record); + + if (error == 0) + zc->zc_guid = (uint64_t)id; + + return (error); +} + +static int +zfs_ioc_clear_fault(zfs_cmd_t *zc) +{ + return (zio_clear_fault((int)zc->zc_guid)); +} + +static int +zfs_ioc_inject_list_next(zfs_cmd_t *zc) +{ + int id = (int)zc->zc_guid; + int error; + + error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name), + &zc->zc_inject_record); + + zc->zc_guid = id; + + return (error); +} + +static int +zfs_ioc_error_log(zfs_cmd_t *zc) +{ + spa_t *spa; + int error; + size_t count = (size_t)zc->zc_nvlist_dst_size; + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst, + &count); + if (error == 0) + zc->zc_nvlist_dst_size = count; + else + zc->zc_nvlist_dst_size = spa_get_errlog_size(spa); + + spa_close(spa, FTAG); + + return (error); +} + +static int +zfs_ioc_clear(zfs_cmd_t *zc) +{ + spa_t *spa; + vdev_t *vd; + int error; + + /* + * On zpool clear we also fix up missing slogs + */ + mutex_enter(&spa_namespace_lock); + spa = spa_lookup(zc->zc_name); + if (spa == NULL) { + mutex_exit(&spa_namespace_lock); + return (EIO); + } + if (spa->spa_log_state == SPA_LOG_MISSING) { + /* we need to let spa_open/spa_load clear the chains */ + spa->spa_log_state = SPA_LOG_CLEAR; + } + mutex_exit(&spa_namespace_lock); + + if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) + return (error); + + spa_vdev_state_enter(spa); + + if (zc->zc_guid == 0) { + vd = NULL; + } else { + vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE); + if (vd == NULL) { + (void) spa_vdev_state_exit(spa, NULL, ENODEV); + spa_close(spa, FTAG); + return (ENODEV); + } + } + + vdev_clear(spa, vd); + + (void) spa_vdev_state_exit(spa, NULL, 0); + + /* + * Resume any suspended I/Os. + */ + zio_resume(spa); + + spa_close(spa, FTAG); + + return (0); +} + +/* + * inputs: + * zc_name name of filesystem + * zc_value name of origin snapshot + * + * outputs: none + */ +static int +zfs_ioc_promote(zfs_cmd_t *zc) +{ + char *cp; + + /* + * We don't need to unmount *all* the origin fs's snapshots, but + * it's easier. + */ + cp = strchr(zc->zc_value, '@'); + if (cp) + *cp = '\0'; + (void) dmu_objset_find(zc->zc_value, + zfs_unmount_snap, NULL, DS_FIND_SNAPSHOTS); + return (dsl_dataset_promote(zc->zc_name)); +} + +/* + * We don't want to have a hard dependency + * against some special symbols in sharefs + * nfs, and smbsrv. Determine them if needed when + * the first file system is shared. + * Neither sharefs, nfs or smbsrv are unloadable modules. + */ +int (*znfsexport_fs)(void *arg); +int (*zshare_fs)(enum sharefs_sys_op, share_t *, uint32_t); +int (*zsmbexport_fs)(void *arg, boolean_t add_share); + +int zfs_nfsshare_inited; +int zfs_smbshare_inited; + +ddi_modhandle_t nfs_mod; +ddi_modhandle_t sharefs_mod; +ddi_modhandle_t smbsrv_mod; +kmutex_t zfs_share_lock; + +static int +zfs_init_sharefs() +{ + int error; + + ASSERT(MUTEX_HELD(&zfs_share_lock)); + /* Both NFS and SMB shares also require sharetab support. */ + if (sharefs_mod == NULL && ((sharefs_mod = + ddi_modopen("fs/sharefs", + KRTLD_MODE_FIRST, &error)) == NULL)) { + return (ENOSYS); + } + if (zshare_fs == NULL && ((zshare_fs = + (int (*)(enum sharefs_sys_op, share_t *, uint32_t)) + ddi_modsym(sharefs_mod, "sharefs_impl", &error)) == NULL)) { + return (ENOSYS); + } + return (0); +} + +static int +zfs_ioc_share(zfs_cmd_t *zc) +{ + int error; + int opcode; + + switch (zc->zc_share.z_sharetype) { + case ZFS_SHARE_NFS: + case ZFS_UNSHARE_NFS: + if (zfs_nfsshare_inited == 0) { + mutex_enter(&zfs_share_lock); + if (nfs_mod == NULL && ((nfs_mod = ddi_modopen("fs/nfs", + KRTLD_MODE_FIRST, &error)) == NULL)) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + if (znfsexport_fs == NULL && + ((znfsexport_fs = (int (*)(void *)) + ddi_modsym(nfs_mod, + "nfs_export", &error)) == NULL)) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + error = zfs_init_sharefs(); + if (error) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + zfs_nfsshare_inited = 1; + mutex_exit(&zfs_share_lock); + } + break; + case ZFS_SHARE_SMB: + case ZFS_UNSHARE_SMB: + if (zfs_smbshare_inited == 0) { + mutex_enter(&zfs_share_lock); + if (smbsrv_mod == NULL && ((smbsrv_mod = + ddi_modopen("drv/smbsrv", + KRTLD_MODE_FIRST, &error)) == NULL)) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + if (zsmbexport_fs == NULL && ((zsmbexport_fs = + (int (*)(void *, boolean_t))ddi_modsym(smbsrv_mod, + "smb_server_share", &error)) == NULL)) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + error = zfs_init_sharefs(); + if (error) { + mutex_exit(&zfs_share_lock); + return (ENOSYS); + } + zfs_smbshare_inited = 1; + mutex_exit(&zfs_share_lock); + } + break; + default: + return (EINVAL); + } + + switch (zc->zc_share.z_sharetype) { + case ZFS_SHARE_NFS: + case ZFS_UNSHARE_NFS: + if (error = + znfsexport_fs((void *) + (uintptr_t)zc->zc_share.z_exportdata)) + return (error); + break; + case ZFS_SHARE_SMB: + case ZFS_UNSHARE_SMB: + if (error = zsmbexport_fs((void *) + (uintptr_t)zc->zc_share.z_exportdata, + zc->zc_share.z_sharetype == ZFS_SHARE_SMB ? + B_TRUE : B_FALSE)) { + return (error); + } + break; + } + + opcode = (zc->zc_share.z_sharetype == ZFS_SHARE_NFS || + zc->zc_share.z_sharetype == ZFS_SHARE_SMB) ? + SHAREFS_ADD : SHAREFS_REMOVE; + + /* + * Add or remove share from sharetab + */ + error = zshare_fs(opcode, + (void *)(uintptr_t)zc->zc_share.z_sharedata, + zc->zc_share.z_sharemax); + + return (error); + +} + +/* + * pool create, destroy, and export don't log the history as part of + * zfsdev_ioctl, but rather zfs_ioc_pool_create, and zfs_ioc_pool_export + * do the logging of those commands. + */ +static zfs_ioc_vec_t zfs_ioc_vec[] = { + { zfs_ioc_pool_create, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_pool_destroy, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_pool_import, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_pool_export, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_pool_configs, zfs_secpolicy_none, NO_NAME, B_FALSE }, + { zfs_ioc_pool_stats, zfs_secpolicy_read, POOL_NAME, B_FALSE }, + { zfs_ioc_pool_tryimport, zfs_secpolicy_config, NO_NAME, B_FALSE }, + { zfs_ioc_pool_scrub, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_pool_freeze, zfs_secpolicy_config, NO_NAME, B_FALSE }, + { zfs_ioc_pool_upgrade, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_pool_get_history, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_vdev_add, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_vdev_remove, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_vdev_set_state, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_vdev_attach, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_vdev_detach, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_vdev_setpath, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_objset_stats, zfs_secpolicy_read, DATASET_NAME, B_FALSE }, + { zfs_ioc_objset_zplprops, zfs_secpolicy_read, DATASET_NAME, B_FALSE }, + { zfs_ioc_dataset_list_next, zfs_secpolicy_read, + DATASET_NAME, B_FALSE }, + { zfs_ioc_snapshot_list_next, zfs_secpolicy_read, + DATASET_NAME, B_FALSE }, + { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE }, + { zfs_ioc_create_minor, zfs_secpolicy_minor, DATASET_NAME, B_FALSE }, + { zfs_ioc_remove_minor, zfs_secpolicy_minor, DATASET_NAME, B_FALSE }, + { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE }, + { zfs_ioc_destroy, zfs_secpolicy_destroy, DATASET_NAME, B_TRUE }, + { zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME, B_TRUE }, + { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE }, + { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE }, + { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_TRUE }, + { zfs_ioc_inject_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE }, + { zfs_ioc_clear_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE }, + { zfs_ioc_inject_list_next, zfs_secpolicy_inject, NO_NAME, B_FALSE }, + { zfs_ioc_error_log, zfs_secpolicy_inject, POOL_NAME, B_FALSE }, + { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_promote, zfs_secpolicy_promote, DATASET_NAME, B_TRUE }, + { zfs_ioc_destroy_snaps, zfs_secpolicy_destroy, DATASET_NAME, B_TRUE }, + { zfs_ioc_snapshot, zfs_secpolicy_snapshot, DATASET_NAME, B_TRUE }, + { zfs_ioc_dsobj_to_dsname, zfs_secpolicy_config, POOL_NAME, B_FALSE }, + { zfs_ioc_obj_to_path, zfs_secpolicy_config, NO_NAME, B_FALSE }, + { zfs_ioc_pool_set_props, zfs_secpolicy_config, POOL_NAME, B_TRUE }, + { zfs_ioc_pool_get_props, zfs_secpolicy_read, POOL_NAME, B_FALSE }, + { zfs_ioc_set_fsacl, zfs_secpolicy_fsacl, DATASET_NAME, B_TRUE }, + { zfs_ioc_get_fsacl, zfs_secpolicy_read, DATASET_NAME, B_FALSE }, + { zfs_ioc_iscsi_perm_check, zfs_secpolicy_iscsi, + DATASET_NAME, B_FALSE }, + { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE }, + { zfs_ioc_inherit_prop, zfs_secpolicy_inherit, DATASET_NAME, B_TRUE }, +}; + +static int +zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) +{ + zfs_cmd_t *zc; + uint_t vec; + int error, rc; + + if (getminor(dev) != 0) + return (zvol_ioctl(dev, cmd, arg, flag, cr, rvalp)); + + vec = cmd - ZFS_IOC; + ASSERT3U(getmajor(dev), ==, ddi_driver_major(zfs_dip)); + + if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0])) + return (EINVAL); + + zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); + + error = xcopyin((void *)arg, zc, sizeof (zfs_cmd_t)); + + if (error == 0) + error = zfs_ioc_vec[vec].zvec_secpolicy(zc, cr); + + /* + * Ensure that all pool/dataset names are valid before we pass down to + * the lower layers. + */ + if (error == 0) { + zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; + switch (zfs_ioc_vec[vec].zvec_namecheck) { + case POOL_NAME: + if (pool_namecheck(zc->zc_name, NULL, NULL) != 0) + error = EINVAL; + break; + + case DATASET_NAME: + if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0) + error = EINVAL; + break; + + case NO_NAME: + break; + } + } + + if (error == 0) + error = zfs_ioc_vec[vec].zvec_func(zc); + + rc = xcopyout(zc, (void *)arg, sizeof (zfs_cmd_t)); + if (error == 0) { + error = rc; + if (zfs_ioc_vec[vec].zvec_his_log == B_TRUE) + zfs_log_history(zc); + } + + kmem_free(zc, sizeof (zfs_cmd_t)); + return (error); +} + +static int +zfs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) +{ + if (cmd != DDI_ATTACH) + return (DDI_FAILURE); + + if (ddi_create_minor_node(dip, "zfs", S_IFCHR, 0, + DDI_PSEUDO, 0) == DDI_FAILURE) + return (DDI_FAILURE); + + zfs_dip = dip; + + ddi_report_dev(dip); + + return (DDI_SUCCESS); +} + +static int +zfs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) +{ + if (spa_busy() || zfs_busy() || zvol_busy()) + return (DDI_FAILURE); + + if (cmd != DDI_DETACH) + return (DDI_FAILURE); + + zfs_dip = NULL; + + ddi_prop_remove_all(dip); + ddi_remove_minor_node(dip, NULL); + + return (DDI_SUCCESS); +} + +/*ARGSUSED*/ +static int +zfs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) +{ + switch (infocmd) { + case DDI_INFO_DEVT2DEVINFO: + *result = zfs_dip; + return (DDI_SUCCESS); + + case DDI_INFO_DEVT2INSTANCE: + *result = (void *)0; + return (DDI_SUCCESS); + } + + return (DDI_FAILURE); +} + +/* + * OK, so this is a little weird. + * + * /dev/zfs is the control node, i.e. minor 0. + * /dev/zvol/[r]dsk/pool/dataset are the zvols, minor > 0. + * + * /dev/zfs has basically nothing to do except serve up ioctls, + * so most of the standard driver entry points are in zvol.c. + */ +static struct cb_ops zfs_cb_ops = { + zvol_open, /* open */ + zvol_close, /* close */ + zvol_strategy, /* strategy */ + nodev, /* print */ + zvol_dump, /* dump */ + zvol_read, /* read */ + zvol_write, /* write */ + zfsdev_ioctl, /* ioctl */ + nodev, /* devmap */ + nodev, /* mmap */ + nodev, /* segmap */ + nochpoll, /* poll */ + ddi_prop_op, /* prop_op */ + NULL, /* streamtab */ + D_NEW | D_MP | D_64BIT, /* Driver compatibility flag */ + CB_REV, /* version */ + nodev, /* async read */ + nodev, /* async write */ +}; + +static struct dev_ops zfs_dev_ops = { + DEVO_REV, /* version */ + 0, /* refcnt */ + zfs_info, /* info */ + nulldev, /* identify */ + nulldev, /* probe */ + zfs_attach, /* attach */ + zfs_detach, /* detach */ + nodev, /* reset */ + &zfs_cb_ops, /* driver operations */ + NULL, /* no bus operations */ + NULL, /* power */ + ddi_quiesce_not_needed, /* quiesce */ +}; + +static struct modldrv zfs_modldrv = { + &mod_driverops, + "ZFS storage pool", + &zfs_dev_ops +}; + +static struct modlinkage modlinkage = { + MODREV_1, + (void *)&zfs_modlfs, + (void *)&zfs_modldrv, + NULL +}; + + +uint_t zfs_fsyncer_key; +extern uint_t rrw_tsd_key; + +int +_init(void) +{ + int error; + + spa_init(FREAD | FWRITE); + zfs_init(); + zvol_init(); + + if ((error = mod_install(&modlinkage)) != 0) { + zvol_fini(); + zfs_fini(); + spa_fini(); + return (error); + } + + tsd_create(&zfs_fsyncer_key, NULL); + tsd_create(&rrw_tsd_key, NULL); + + error = ldi_ident_from_mod(&modlinkage, &zfs_li); + ASSERT(error == 0); + mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL); + + return (0); +} + +int +_fini(void) +{ + int error; + + if (spa_busy() || zfs_busy() || zvol_busy() || zio_injection_enabled) + return (EBUSY); + + if ((error = mod_remove(&modlinkage)) != 0) + return (error); + + zvol_fini(); + zfs_fini(); + spa_fini(); + if (zfs_nfsshare_inited) + (void) ddi_modclose(nfs_mod); + if (zfs_smbshare_inited) + (void) ddi_modclose(smbsrv_mod); + if (zfs_nfsshare_inited || zfs_smbshare_inited) + (void) ddi_modclose(sharefs_mod); + + tsd_destroy(&zfs_fsyncer_key); + ldi_ident_release(zfs_li); + zfs_li = NULL; + mutex_destroy(&zfs_share_lock); + + return (error); +} + +int +_info(struct modinfo *modinfop) +{ + return (mod_info(&modlinkage, modinfop)); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_log.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_log.c new file mode 100644 index 000000000000..11cd4c264b57 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_log.c @@ -0,0 +1,694 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * All the functions in this file are used to construct the log entries + * to record transactions. They allocate * an intent log transaction + * structure (itx_t) and save within it all the information necessary to + * possibly replay the transaction. The itx is then assigned a sequence + * number and inserted in the in-memory list anchored in the zilog. + */ + +int +zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap) +{ + int isxvattr = (vap->va_mask & AT_XVATTR); + switch (type) { + case Z_FILE: + if (vsecp == NULL && !isxvattr) + return (TX_CREATE); + if (vsecp && isxvattr) + return (TX_CREATE_ACL_ATTR); + if (vsecp) + return (TX_CREATE_ACL); + else + return (TX_CREATE_ATTR); + /*NOTREACHED*/ + case Z_DIR: + if (vsecp == NULL && !isxvattr) + return (TX_MKDIR); + if (vsecp && isxvattr) + return (TX_MKDIR_ACL_ATTR); + if (vsecp) + return (TX_MKDIR_ACL); + else + return (TX_MKDIR_ATTR); + case Z_XATTRDIR: + return (TX_MKXATTR); + } + ASSERT(0); + return (TX_MAX_TYPE); +} + +/* + * build up the log data necessary for logging xvattr_t + * First lr_attr_t is initialized. following the lr_attr_t + * is the mapsize and attribute bitmap copied from the xvattr_t. + * Following the bitmap and bitmapsize two 64 bit words are reserved + * for the create time which may be set. Following the create time + * records a single 64 bit integer which has the bits to set on + * replay for the xvattr. + */ +static void +zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap) +{ + uint32_t *bitmap; + uint64_t *attrs; + uint64_t *crtime; + xoptattr_t *xoap; + void *scanstamp; + int i; + + xoap = xva_getxoptattr(xvap); + ASSERT(xoap); + + lrattr->lr_attr_masksize = xvap->xva_mapsize; + bitmap = &lrattr->lr_attr_bitmap; + for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) { + *bitmap = xvap->xva_reqattrmap[i]; + } + + /* Now pack the attributes up in a single uint64_t */ + attrs = (uint64_t *)bitmap; + crtime = attrs + 1; + scanstamp = (caddr_t)(crtime + 2); + *attrs = 0; + if (XVA_ISSET_REQ(xvap, XAT_READONLY)) + *attrs |= (xoap->xoa_readonly == 0) ? 0 : + XAT0_READONLY; + if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) + *attrs |= (xoap->xoa_hidden == 0) ? 0 : + XAT0_HIDDEN; + if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) + *attrs |= (xoap->xoa_system == 0) ? 0 : + XAT0_SYSTEM; + if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) + *attrs |= (xoap->xoa_archive == 0) ? 0 : + XAT0_ARCHIVE; + if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) + *attrs |= (xoap->xoa_immutable == 0) ? 0 : + XAT0_IMMUTABLE; + if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) + *attrs |= (xoap->xoa_nounlink == 0) ? 0 : + XAT0_NOUNLINK; + if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) + *attrs |= (xoap->xoa_appendonly == 0) ? 0 : + XAT0_APPENDONLY; + if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) + *attrs |= (xoap->xoa_opaque == 0) ? 0 : + XAT0_APPENDONLY; + if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) + *attrs |= (xoap->xoa_nodump == 0) ? 0 : + XAT0_NODUMP; + if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) + *attrs |= (xoap->xoa_av_quarantined == 0) ? 0 : + XAT0_AV_QUARANTINED; + if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) + *attrs |= (xoap->xoa_av_modified == 0) ? 0 : + XAT0_AV_MODIFIED; + if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) + ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime); + if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) + bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ); +} + +static void * +zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start) +{ + zfs_fuid_t *zfuid; + uint64_t *fuidloc = start; + + /* First copy in the ACE FUIDs */ + for (zfuid = list_head(&fuidp->z_fuids); zfuid; + zfuid = list_next(&fuidp->z_fuids, zfuid)) { + *fuidloc++ = zfuid->z_logfuid; + } + return (fuidloc); +} + + +static void * +zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start) +{ + zfs_fuid_domain_t *zdomain; + + /* now copy in the domain info, if any */ + if (fuidp->z_domain_str_sz != 0) { + for (zdomain = list_head(&fuidp->z_domains); zdomain; + zdomain = list_next(&fuidp->z_domains, zdomain)) { + bcopy((void *)zdomain->z_domain, start, + strlen(zdomain->z_domain) + 1); + start = (caddr_t)start + + strlen(zdomain->z_domain) + 1; + } + } + return (start); +} + +/* + * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, + * TX_MKDIR_ATTR and TX_MKXATTR + * transactions. + * + * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID + * domain information appended prior to the name. In this case the + * uid/gid in the log record will be a log centric FUID. + * + * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that + * may contain attributes, ACL and optional fuid information. + * + * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify + * and ACL and normal users/groups in the ACEs. + * + * There may be an optional xvattr attribute information similar + * to zfs_log_setattr. + * + * Also, after the file name "domain" strings may be appended. + */ +void +zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name, vsecattr_t *vsecp, + zfs_fuid_info_t *fuidp, vattr_t *vap) +{ + itx_t *itx; + uint64_t seq; + lr_create_t *lr; + lr_acl_create_t *lracl; + size_t aclsize; + size_t xvatsize = 0; + size_t txsize; + xvattr_t *xvap = (xvattr_t *)vap; + void *end; + size_t lrsize; + size_t namesize = strlen(name) + 1; + size_t fuidsz = 0; + + if (zilog == NULL) + return; + + /* + * If we have FUIDs present then add in space for + * domains and ACE fuid's if any. + */ + if (fuidp) { + fuidsz += fuidp->z_domain_str_sz; + fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t); + } + + if (vap->va_mask & AT_XVATTR) + xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize); + + if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR || + (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR || + (int)txtype == TX_MKXATTR) { + txsize = sizeof (*lr) + namesize + fuidsz + xvatsize; + lrsize = sizeof (*lr); + } else { + aclsize = (vsecp) ? vsecp->vsa_aclentsz : 0; + txsize = + sizeof (lr_acl_create_t) + namesize + fuidsz + + ZIL_ACE_LENGTH(aclsize) + xvatsize; + lrsize = sizeof (lr_acl_create_t); + } + + itx = zil_itx_create(txtype, txsize); + + lr = (lr_create_t *)&itx->itx_lr; + lr->lr_doid = dzp->z_id; + lr->lr_foid = zp->z_id; + lr->lr_mode = zp->z_phys->zp_mode; + if (!IS_EPHEMERAL(zp->z_phys->zp_uid)) { + lr->lr_uid = (uint64_t)zp->z_phys->zp_uid; + } else { + lr->lr_uid = fuidp->z_fuid_owner; + } + if (!IS_EPHEMERAL(zp->z_phys->zp_gid)) { + lr->lr_gid = (uint64_t)zp->z_phys->zp_gid; + } else { + lr->lr_gid = fuidp->z_fuid_group; + } + lr->lr_gen = zp->z_phys->zp_gen; + lr->lr_crtime[0] = zp->z_phys->zp_crtime[0]; + lr->lr_crtime[1] = zp->z_phys->zp_crtime[1]; + lr->lr_rdev = zp->z_phys->zp_rdev; + + /* + * Fill in xvattr info if any + */ + if (vap->va_mask & AT_XVATTR) { + zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap); + end = (caddr_t)lr + lrsize + xvatsize; + } else { + end = (caddr_t)lr + lrsize; + } + + /* Now fill in any ACL info */ + + if (vsecp) { + lracl = (lr_acl_create_t *)&itx->itx_lr; + lracl->lr_aclcnt = vsecp->vsa_aclcnt; + lracl->lr_acl_bytes = aclsize; + lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0; + lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0; + if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS) + lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags; + else + lracl->lr_acl_flags = 0; + + bcopy(vsecp->vsa_aclentp, end, aclsize); + end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize); + } + + /* drop in FUID info */ + if (fuidp) { + end = zfs_log_fuid_ids(fuidp, end); + end = zfs_log_fuid_domains(fuidp, end); + } + /* + * Now place file name in log record + */ + bcopy(name, end, namesize); + + seq = zil_itx_assign(zilog, itx, tx); + dzp->z_last_itx = seq; + zp->z_last_itx = seq; +} + +/* + * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions. + */ +void +zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, char *name) +{ + itx_t *itx; + uint64_t seq; + lr_remove_t *lr; + size_t namesize = strlen(name) + 1; + + if (zilog == NULL) + return; + + itx = zil_itx_create(txtype, sizeof (*lr) + namesize); + lr = (lr_remove_t *)&itx->itx_lr; + lr->lr_doid = dzp->z_id; + bcopy(name, (char *)(lr + 1), namesize); + + seq = zil_itx_assign(zilog, itx, tx); + dzp->z_last_itx = seq; +} + +/* + * zfs_log_link() handles TX_LINK transactions. + */ +void +zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name) +{ + itx_t *itx; + uint64_t seq; + lr_link_t *lr; + size_t namesize = strlen(name) + 1; + + if (zilog == NULL) + return; + + itx = zil_itx_create(txtype, sizeof (*lr) + namesize); + lr = (lr_link_t *)&itx->itx_lr; + lr->lr_doid = dzp->z_id; + lr->lr_link_obj = zp->z_id; + bcopy(name, (char *)(lr + 1), namesize); + + seq = zil_itx_assign(zilog, itx, tx); + dzp->z_last_itx = seq; + zp->z_last_itx = seq; +} + +/* + * zfs_log_symlink() handles TX_SYMLINK transactions. + */ +void +zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *dzp, znode_t *zp, char *name, char *link) +{ + itx_t *itx; + uint64_t seq; + lr_create_t *lr; + size_t namesize = strlen(name) + 1; + size_t linksize = strlen(link) + 1; + + if (zilog == NULL) + return; + + itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize); + lr = (lr_create_t *)&itx->itx_lr; + lr->lr_doid = dzp->z_id; + lr->lr_foid = zp->z_id; + lr->lr_mode = zp->z_phys->zp_mode; + lr->lr_uid = zp->z_phys->zp_uid; + lr->lr_gid = zp->z_phys->zp_gid; + lr->lr_gen = zp->z_phys->zp_gen; + lr->lr_crtime[0] = zp->z_phys->zp_crtime[0]; + lr->lr_crtime[1] = zp->z_phys->zp_crtime[1]; + bcopy(name, (char *)(lr + 1), namesize); + bcopy(link, (char *)(lr + 1) + namesize, linksize); + + seq = zil_itx_assign(zilog, itx, tx); + dzp->z_last_itx = seq; + zp->z_last_itx = seq; +} + +/* + * zfs_log_rename() handles TX_RENAME transactions. + */ +void +zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, + znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp) +{ + itx_t *itx; + uint64_t seq; + lr_rename_t *lr; + size_t snamesize = strlen(sname) + 1; + size_t dnamesize = strlen(dname) + 1; + + if (zilog == NULL) + return; + + itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize); + lr = (lr_rename_t *)&itx->itx_lr; + lr->lr_sdoid = sdzp->z_id; + lr->lr_tdoid = tdzp->z_id; + bcopy(sname, (char *)(lr + 1), snamesize); + bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize); + + seq = zil_itx_assign(zilog, itx, tx); + sdzp->z_last_itx = seq; + tdzp->z_last_itx = seq; + szp->z_last_itx = seq; +} + +/* + * zfs_log_write() handles TX_WRITE transactions. + */ +ssize_t zfs_immediate_write_sz = 32768; + +#define ZIL_MAX_LOG_DATA (SPA_MAXBLOCKSIZE - sizeof (zil_trailer_t) - \ + sizeof (lr_write_t)) + +void +zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, offset_t off, ssize_t resid, int ioflag) +{ + itx_wr_state_t write_state; + boolean_t slogging; + uintptr_t fsync_cnt; + + if (zilog == NULL || zp->z_unlinked) + return; + + /* + * Writes are handled in three different ways: + * + * WR_INDIRECT: + * In this mode, if we need to commit the write later, then the block + * is immediately written into the file system (using dmu_sync), + * and a pointer to the block is put into the log record. + * When the txg commits the block is linked in. + * This saves additionally writing the data into the log record. + * There are a few requirements for this to occur: + * - write is greater than zfs_immediate_write_sz + * - not using slogs (as slogs are assumed to always be faster + * than writing into the main pool) + * - the write occupies only one block + * WR_COPIED: + * If we know we'll immediately be committing the + * transaction (FSYNC or FDSYNC), the we allocate a larger + * log record here for the data and copy the data in. + * WR_NEED_COPY: + * Otherwise we don't allocate a buffer, and *if* we need to + * flush the write later then a buffer is allocated and + * we retrieve the data using the dmu. + */ + slogging = spa_has_slogs(zilog->zl_spa); + if (resid > zfs_immediate_write_sz && !slogging && resid <= zp->z_blksz) + write_state = WR_INDIRECT; + else if (ioflag & (FSYNC | FDSYNC)) + write_state = WR_COPIED; + else + write_state = WR_NEED_COPY; + + if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) { + (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1)); + } + + while (resid) { + itx_t *itx; + lr_write_t *lr; + ssize_t len; + + /* + * If the write would overflow the largest block then split it. + */ + if (write_state != WR_INDIRECT && resid > ZIL_MAX_LOG_DATA) + len = SPA_MAXBLOCKSIZE >> 1; + else + len = resid; + + itx = zil_itx_create(txtype, sizeof (*lr) + + (write_state == WR_COPIED ? len : 0)); + lr = (lr_write_t *)&itx->itx_lr; + if (write_state == WR_COPIED && dmu_read(zp->z_zfsvfs->z_os, + zp->z_id, off, len, lr + 1) != 0) { + kmem_free(itx, offsetof(itx_t, itx_lr) + + itx->itx_lr.lrc_reclen); + itx = zil_itx_create(txtype, sizeof (*lr)); + lr = (lr_write_t *)&itx->itx_lr; + write_state = WR_NEED_COPY; + } + + itx->itx_wr_state = write_state; + if (write_state == WR_NEED_COPY) + itx->itx_sod += len; + lr->lr_foid = zp->z_id; + lr->lr_offset = off; + lr->lr_length = len; + lr->lr_blkoff = 0; + BP_ZERO(&lr->lr_blkptr); + + itx->itx_private = zp->z_zfsvfs; + + if ((zp->z_sync_cnt != 0) || (fsync_cnt != 0) || + (ioflag & (FSYNC | FDSYNC))) + itx->itx_sync = B_TRUE; + else + itx->itx_sync = B_FALSE; + + zp->z_last_itx = zil_itx_assign(zilog, itx, tx); + + off += len; + resid -= len; + } +} + +/* + * zfs_log_truncate() handles TX_TRUNCATE transactions. + */ +void +zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, uint64_t off, uint64_t len) +{ + itx_t *itx; + uint64_t seq; + lr_truncate_t *lr; + + if (zilog == NULL || zp->z_unlinked) + return; + + itx = zil_itx_create(txtype, sizeof (*lr)); + lr = (lr_truncate_t *)&itx->itx_lr; + lr->lr_foid = zp->z_id; + lr->lr_offset = off; + lr->lr_length = len; + + itx->itx_sync = (zp->z_sync_cnt != 0); + seq = zil_itx_assign(zilog, itx, tx); + zp->z_last_itx = seq; +} + +/* + * zfs_log_setattr() handles TX_SETATTR transactions. + */ +void +zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, + znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp) +{ + itx_t *itx; + uint64_t seq; + lr_setattr_t *lr; + xvattr_t *xvap = (xvattr_t *)vap; + size_t recsize = sizeof (lr_setattr_t); + void *start; + + + if (zilog == NULL || zp->z_unlinked) + return; + + /* + * If XVATTR set, then log record size needs to allow + * for lr_attr_t + xvattr mask, mapsize and create time + * plus actual attribute values + */ + if (vap->va_mask & AT_XVATTR) + recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize); + + if (fuidp) + recsize += fuidp->z_domain_str_sz; + + itx = zil_itx_create(txtype, recsize); + lr = (lr_setattr_t *)&itx->itx_lr; + lr->lr_foid = zp->z_id; + lr->lr_mask = (uint64_t)mask_applied; + lr->lr_mode = (uint64_t)vap->va_mode; + if ((mask_applied & AT_UID) && IS_EPHEMERAL(vap->va_uid)) + lr->lr_uid = fuidp->z_fuid_owner; + else + lr->lr_uid = (uint64_t)vap->va_uid; + + if ((mask_applied & AT_GID) && IS_EPHEMERAL(vap->va_gid)) + lr->lr_gid = fuidp->z_fuid_group; + else + lr->lr_gid = (uint64_t)vap->va_gid; + + lr->lr_size = (uint64_t)vap->va_size; + ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime); + ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime); + start = (lr_setattr_t *)(lr + 1); + if (vap->va_mask & AT_XVATTR) { + zfs_log_xvattr((lr_attr_t *)start, xvap); + start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize); + } + + /* + * Now stick on domain information if any on end + */ + + if (fuidp) + (void) zfs_log_fuid_domains(fuidp, start); + + itx->itx_sync = (zp->z_sync_cnt != 0); + seq = zil_itx_assign(zilog, itx, tx); + zp->z_last_itx = seq; +} + +/* + * zfs_log_acl() handles TX_ACL transactions. + */ +void +zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp, + vsecattr_t *vsecp, zfs_fuid_info_t *fuidp) +{ + itx_t *itx; + uint64_t seq; + lr_acl_v0_t *lrv0; + lr_acl_t *lr; + int txtype; + int lrsize; + size_t txsize; + size_t aclbytes = vsecp->vsa_aclentsz; + + if (zilog == NULL || zp->z_unlinked) + return; + + txtype = (zp->z_zfsvfs->z_version < ZPL_VERSION_FUID) ? + TX_ACL_V0 : TX_ACL; + + if (txtype == TX_ACL) + lrsize = sizeof (*lr); + else + lrsize = sizeof (*lrv0); + + txsize = lrsize + + ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) + + (fuidp ? fuidp->z_domain_str_sz : 0) + + sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0); + + itx = zil_itx_create(txtype, txsize); + + lr = (lr_acl_t *)&itx->itx_lr; + lr->lr_foid = zp->z_id; + if (txtype == TX_ACL) { + lr->lr_acl_bytes = aclbytes; + lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0; + lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0; + if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) + lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags; + else + lr->lr_acl_flags = 0; + } + lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt; + + if (txtype == TX_ACL_V0) { + lrv0 = (lr_acl_v0_t *)lr; + bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes); + } else { + void *start = (ace_t *)(lr + 1); + + bcopy(vsecp->vsa_aclentp, start, aclbytes); + + start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes); + + if (fuidp) { + start = zfs_log_fuid_ids(fuidp, start); + (void) zfs_log_fuid_domains(fuidp, start); + } + } + + itx->itx_sync = (zp->z_sync_cnt != 0); + seq = zil_itx_assign(zilog, itx, tx); + zp->z_last_itx = seq; +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_replay.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_replay.c new file mode 100644 index 000000000000..85b79703a780 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_replay.c @@ -0,0 +1,878 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Functions to replay ZFS intent log (ZIL) records + * The functions are called through a function vector (zfs_replay_vector) + * which is indexed by the transaction type. + */ + +static void +zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode, + uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid) +{ + bzero(vap, sizeof (*vap)); + vap->va_mask = (uint_t)mask; + vap->va_type = IFTOVT(mode); + vap->va_mode = mode & MODEMASK; + vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid; + vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid; + vap->va_rdev = zfs_cmpldev(rdev); + vap->va_nodeid = nodeid; +} + +/* ARGSUSED */ +static int +zfs_replay_error(zfsvfs_t *zfsvfs, lr_t *lr, boolean_t byteswap) +{ + return (ENOTSUP); +} + +static void +zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap) +{ + xoptattr_t *xoap = NULL; + uint64_t *attrs; + uint64_t *crtime; + uint32_t *bitmap; + void *scanstamp; + int i; + + xvap->xva_vattr.va_mask |= AT_XVATTR; + if ((xoap = xva_getxoptattr(xvap)) == NULL) { + xvap->xva_vattr.va_mask &= ~AT_XVATTR; /* shouldn't happen */ + return; + } + + ASSERT(lrattr->lr_attr_masksize == xvap->xva_mapsize); + + bitmap = &lrattr->lr_attr_bitmap; + for (i = 0; i != lrattr->lr_attr_masksize; i++, bitmap++) + xvap->xva_reqattrmap[i] = *bitmap; + + attrs = (uint64_t *)(lrattr + lrattr->lr_attr_masksize - 1); + crtime = attrs + 1; + scanstamp = (caddr_t)(crtime + 2); + + if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) + xoap->xoa_hidden = ((*attrs & XAT0_HIDDEN) != 0); + if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) + xoap->xoa_system = ((*attrs & XAT0_SYSTEM) != 0); + if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) + xoap->xoa_archive = ((*attrs & XAT0_ARCHIVE) != 0); + if (XVA_ISSET_REQ(xvap, XAT_READONLY)) + xoap->xoa_readonly = ((*attrs & XAT0_READONLY) != 0); + if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) + xoap->xoa_immutable = ((*attrs & XAT0_IMMUTABLE) != 0); + if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) + xoap->xoa_nounlink = ((*attrs & XAT0_NOUNLINK) != 0); + if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) + xoap->xoa_appendonly = ((*attrs & XAT0_APPENDONLY) != 0); + if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) + xoap->xoa_nodump = ((*attrs & XAT0_NODUMP) != 0); + if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) + xoap->xoa_opaque = ((*attrs & XAT0_OPAQUE) != 0); + if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) + xoap->xoa_av_modified = ((*attrs & XAT0_AV_MODIFIED) != 0); + if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) + xoap->xoa_av_quarantined = + ((*attrs & XAT0_AV_QUARANTINED) != 0); + if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) + ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime); + if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) + bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ); +} + +static int +zfs_replay_domain_cnt(uint64_t uid, uint64_t gid) +{ + uint64_t uid_idx; + uint64_t gid_idx; + int domcnt = 0; + + uid_idx = FUID_INDEX(uid); + gid_idx = FUID_INDEX(gid); + if (uid_idx) + domcnt++; + if (gid_idx > 0 && gid_idx != uid_idx) + domcnt++; + + return (domcnt); +} + +static void * +zfs_replay_fuid_domain_common(zfs_fuid_info_t *fuid_infop, void *start, + int domcnt) +{ + int i; + + for (i = 0; i != domcnt; i++) { + fuid_infop->z_domain_table[i] = start; + start = (caddr_t)start + strlen(start) + 1; + } + + return (start); +} + +/* + * Set the uid/gid in the fuid_info structure. + */ +static void +zfs_replay_fuid_ugid(zfs_fuid_info_t *fuid_infop, uint64_t uid, uint64_t gid) +{ + /* + * If owner or group are log specific FUIDs then slurp up + * domain information and build zfs_fuid_info_t + */ + if (IS_EPHEMERAL(uid)) + fuid_infop->z_fuid_owner = uid; + + if (IS_EPHEMERAL(gid)) + fuid_infop->z_fuid_group = gid; +} + +/* + * Load fuid domains into fuid_info_t + */ +static zfs_fuid_info_t * +zfs_replay_fuid_domain(void *buf, void **end, uint64_t uid, uint64_t gid) +{ + int domcnt; + + zfs_fuid_info_t *fuid_infop; + + fuid_infop = zfs_fuid_info_alloc(); + + domcnt = zfs_replay_domain_cnt(uid, gid); + + if (domcnt == 0) + return (fuid_infop); + + fuid_infop->z_domain_table = + kmem_zalloc(domcnt * sizeof (char **), KM_SLEEP); + + zfs_replay_fuid_ugid(fuid_infop, uid, gid); + + fuid_infop->z_domain_cnt = domcnt; + *end = zfs_replay_fuid_domain_common(fuid_infop, buf, domcnt); + return (fuid_infop); +} + +/* + * load zfs_fuid_t's and fuid_domains into fuid_info_t + */ +static zfs_fuid_info_t * +zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid, + uint64_t gid) +{ + uint64_t *log_fuid = (uint64_t *)start; + zfs_fuid_info_t *fuid_infop; + int i; + + fuid_infop = zfs_fuid_info_alloc(); + fuid_infop->z_domain_cnt = domcnt; + + fuid_infop->z_domain_table = + kmem_zalloc(domcnt * sizeof (char **), KM_SLEEP); + + for (i = 0; i != idcnt; i++) { + zfs_fuid_t *zfuid; + + zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP); + zfuid->z_logfuid = *log_fuid; + zfuid->z_id = -1; + zfuid->z_domidx = 0; + list_insert_tail(&fuid_infop->z_fuids, zfuid); + log_fuid++; + } + + zfs_replay_fuid_ugid(fuid_infop, uid, gid); + + *end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt); + return (fuid_infop); +} + +static void +zfs_replay_swap_attrs(lr_attr_t *lrattr) +{ + /* swap the lr_attr structure */ + byteswap_uint32_array(lrattr, sizeof (*lrattr)); + /* swap the bitmap */ + byteswap_uint32_array(lrattr + 1, (lrattr->lr_attr_masksize - 1) * + sizeof (uint32_t)); + /* swap the attributes, create time + 64 bit word for attributes */ + byteswap_uint64_array((caddr_t)(lrattr + 1) + (sizeof (uint32_t) * + (lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t)); +} + +/* + * Replay file create with optional ACL, xvattr information as well + * as option FUID information. + */ +static int +zfs_replay_create_acl(zfsvfs_t *zfsvfs, + lr_acl_create_t *lracl, boolean_t byteswap) +{ + char *name = NULL; /* location determined later */ + lr_create_t *lr = (lr_create_t *)lracl; + znode_t *dzp; + vnode_t *vp = NULL; + xvattr_t xva; + int vflg = 0; + vsecattr_t vsec = { 0 }; + lr_attr_t *lrattr; + void *aclstart; + void *fuidstart; + size_t xvatlen = 0; + uint64_t txtype; + int error; + + if (byteswap) { + byteswap_uint64_array(lracl, sizeof (*lracl)); + txtype = (int)lr->lr_common.lrc_txtype; + if (txtype == TX_CREATE_ACL_ATTR || + txtype == TX_MKDIR_ACL_ATTR) { + lrattr = (lr_attr_t *)(caddr_t)(lracl + 1); + zfs_replay_swap_attrs(lrattr); + xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); + } + + aclstart = (caddr_t)(lracl + 1) + xvatlen; + zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE); + /* swap fuids */ + if (lracl->lr_fuidcnt) { + byteswap_uint64_array((caddr_t)aclstart + + ZIL_ACE_LENGTH(lracl->lr_acl_bytes), + lracl->lr_fuidcnt * sizeof (uint64_t)); + } + } + + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) + return (error); + + xva_init(&xva); + zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID, + lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid); + + /* + * All forms of zfs create (create, mkdir, mkxattrdir, symlink) + * eventually end up in zfs_mknode(), which assigns the object's + * creation time and generation number. The generic VOP_CREATE() + * doesn't have either concept, so we smuggle the values inside + * the vattr's otherwise unused va_ctime and va_nblocks fields. + */ + ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime); + xva.xva_vattr.va_nblocks = lr->lr_gen; + + error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL); + if (error != ENOENT) + goto bail; + + if (lr->lr_common.lrc_txtype & TX_CI) + vflg |= FIGNORECASE; + switch ((int)lr->lr_common.lrc_txtype) { + case TX_CREATE_ACL: + aclstart = (caddr_t)(lracl + 1); + fuidstart = (caddr_t)aclstart + + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, + (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, + lr->lr_uid, lr->lr_gid); + /*FALLTHROUGH*/ + case TX_CREATE_ACL_ATTR: + if (name == NULL) { + lrattr = (lr_attr_t *)(caddr_t)(lracl + 1); + xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); + xva.xva_vattr.va_mask |= AT_XVATTR; + zfs_replay_xvattr(lrattr, &xva); + } + vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS; + vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen; + vsec.vsa_aclcnt = lracl->lr_aclcnt; + vsec.vsa_aclentsz = lracl->lr_acl_bytes; + vsec.vsa_aclflags = lracl->lr_acl_flags; + if (zfsvfs->z_fuid_replay == NULL) { + fuidstart = (caddr_t)(lracl + 1) + xvatlen + + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); + zfsvfs->z_fuid_replay = + zfs_replay_fuids(fuidstart, + (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, + lr->lr_uid, lr->lr_gid); + } + + error = VOP_CREATE(ZTOV(dzp), name, &xva.xva_vattr, + 0, 0, &vp, kcred, vflg, NULL, &vsec); + break; + case TX_MKDIR_ACL: + aclstart = (caddr_t)(lracl + 1); + fuidstart = (caddr_t)aclstart + + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); + zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, + (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, + lr->lr_uid, lr->lr_gid); + /*FALLTHROUGH*/ + case TX_MKDIR_ACL_ATTR: + if (name == NULL) { + lrattr = (lr_attr_t *)(caddr_t)(lracl + 1); + xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); + zfs_replay_xvattr(lrattr, &xva); + } + vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS; + vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen; + vsec.vsa_aclcnt = lracl->lr_aclcnt; + vsec.vsa_aclentsz = lracl->lr_acl_bytes; + vsec.vsa_aclflags = lracl->lr_acl_flags; + if (zfsvfs->z_fuid_replay == NULL) { + fuidstart = (caddr_t)(lracl + 1) + xvatlen + + ZIL_ACE_LENGTH(lracl->lr_acl_bytes); + zfsvfs->z_fuid_replay = + zfs_replay_fuids(fuidstart, + (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt, + lr->lr_uid, lr->lr_gid); + } + error = VOP_MKDIR(ZTOV(dzp), name, &xva.xva_vattr, + &vp, kcred, NULL, vflg, &vsec); + break; + default: + error = ENOTSUP; + } + +bail: + if (error == 0 && vp != NULL) + VN_RELE(vp); + + VN_RELE(ZTOV(dzp)); + + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; + + return (error); +} + +static int +zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap) +{ + char *name = NULL; /* location determined later */ + char *link; /* symlink content follows name */ + znode_t *dzp; + vnode_t *vp = NULL; + xvattr_t xva; + int vflg = 0; + size_t lrsize = sizeof (lr_create_t); + lr_attr_t *lrattr; + void *start; + size_t xvatlen; + uint64_t txtype; + int error; + + if (byteswap) { + byteswap_uint64_array(lr, sizeof (*lr)); + txtype = (int)lr->lr_common.lrc_txtype; + if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR) + zfs_replay_swap_attrs((lr_attr_t *)(lr + 1)); + } + + + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) + return (error); + + xva_init(&xva); + zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID, + lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid); + + /* + * All forms of zfs create (create, mkdir, mkxattrdir, symlink) + * eventually end up in zfs_mknode(), which assigns the object's + * creation time and generation number. The generic VOP_CREATE() + * doesn't have either concept, so we smuggle the values inside + * the vattr's otherwise unused va_ctime and va_nblocks fields. + */ + ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime); + xva.xva_vattr.va_nblocks = lr->lr_gen; + + error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL); + if (error != ENOENT) + goto out; + + if (lr->lr_common.lrc_txtype & TX_CI) + vflg |= FIGNORECASE; + + /* + * Symlinks don't have fuid info, and CIFS never creates + * symlinks. + * + * The _ATTR versions will grab the fuid info in their subcases. + */ + if ((int)lr->lr_common.lrc_txtype != TX_SYMLINK && + (int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR && + (int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) { + start = (lr + 1); + zfsvfs->z_fuid_replay = + zfs_replay_fuid_domain(start, &start, + lr->lr_uid, lr->lr_gid); + } + + switch ((int)lr->lr_common.lrc_txtype) { + case TX_CREATE_ATTR: + lrattr = (lr_attr_t *)(caddr_t)(lr + 1); + xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); + zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva); + start = (caddr_t)(lr + 1) + xvatlen; + zfsvfs->z_fuid_replay = + zfs_replay_fuid_domain(start, &start, + lr->lr_uid, lr->lr_gid); + name = (char *)start; + + /*FALLTHROUGH*/ + case TX_CREATE: + if (name == NULL) + name = (char *)start; + + error = VOP_CREATE(ZTOV(dzp), name, &xva.xva_vattr, + 0, 0, &vp, kcred, vflg, NULL, NULL); + break; + case TX_MKDIR_ATTR: + lrattr = (lr_attr_t *)(caddr_t)(lr + 1); + xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize); + zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva); + start = (caddr_t)(lr + 1) + xvatlen; + zfsvfs->z_fuid_replay = + zfs_replay_fuid_domain(start, &start, + lr->lr_uid, lr->lr_gid); + name = (char *)start; + + /*FALLTHROUGH*/ + case TX_MKDIR: + if (name == NULL) + name = (char *)(lr + 1); + + error = VOP_MKDIR(ZTOV(dzp), name, &xva.xva_vattr, + &vp, kcred, NULL, vflg, NULL); + break; + case TX_MKXATTR: + name = (char *)(lr + 1); + error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &vp, kcred); + break; + case TX_SYMLINK: + name = (char *)(lr + 1); + link = name + strlen(name) + 1; + error = VOP_SYMLINK(ZTOV(dzp), name, &xva.xva_vattr, + link, kcred, NULL, vflg); + break; + default: + error = ENOTSUP; + } + +out: + if (error == 0 && vp != NULL) + VN_RELE(vp); + + VN_RELE(ZTOV(dzp)); + + if (zfsvfs->z_fuid_replay) + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; + return (error); +} + +static int +zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap) +{ + char *name = (char *)(lr + 1); /* name follows lr_remove_t */ + znode_t *dzp; + int error; + int vflg = 0; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) + return (error); + + if (lr->lr_common.lrc_txtype & TX_CI) + vflg |= FIGNORECASE; + + switch ((int)lr->lr_common.lrc_txtype) { + case TX_REMOVE: + error = VOP_REMOVE(ZTOV(dzp), name, kcred, NULL, vflg); + break; + case TX_RMDIR: + error = VOP_RMDIR(ZTOV(dzp), name, NULL, kcred, NULL, vflg); + break; + default: + error = ENOTSUP; + } + + VN_RELE(ZTOV(dzp)); + + return (error); +} + +static int +zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap) +{ + char *name = (char *)(lr + 1); /* name follows lr_link_t */ + znode_t *dzp, *zp; + int error; + int vflg = 0; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) + return (error); + + if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) { + VN_RELE(ZTOV(dzp)); + return (error); + } + + if (lr->lr_common.lrc_txtype & TX_CI) + vflg |= FIGNORECASE; + + error = VOP_LINK(ZTOV(dzp), ZTOV(zp), name, kcred, NULL, vflg); + + VN_RELE(ZTOV(zp)); + VN_RELE(ZTOV(dzp)); + + return (error); +} + +static int +zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap) +{ + char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */ + char *tname = sname + strlen(sname) + 1; + znode_t *sdzp, *tdzp; + int error; + int vflg = 0; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0) + return (error); + + if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) { + VN_RELE(ZTOV(sdzp)); + return (error); + } + + if (lr->lr_common.lrc_txtype & TX_CI) + vflg |= FIGNORECASE; + + error = VOP_RENAME(ZTOV(sdzp), sname, ZTOV(tdzp), tname, kcred, + NULL, vflg); + + VN_RELE(ZTOV(tdzp)); + VN_RELE(ZTOV(sdzp)); + + return (error); +} + +static int +zfs_replay_write(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap) +{ + char *data = (char *)(lr + 1); /* data follows lr_write_t */ + znode_t *zp; + int error; + ssize_t resid; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { + /* + * As we can log writes out of order, it's possible the + * file has been removed. In this case just drop the write + * and return success. + */ + if (error == ENOENT) + error = 0; + return (error); + } + + error = vn_rdwr(UIO_WRITE, ZTOV(zp), data, lr->lr_length, + lr->lr_offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); + + VN_RELE(ZTOV(zp)); + + return (error); +} + +static int +zfs_replay_truncate(zfsvfs_t *zfsvfs, lr_truncate_t *lr, boolean_t byteswap) +{ + znode_t *zp; + flock64_t fl; + int error; + + if (byteswap) + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { + /* + * As we can log truncates out of order, it's possible the + * file has been removed. In this case just drop the truncate + * and return success. + */ + if (error == ENOENT) + error = 0; + return (error); + } + + bzero(&fl, sizeof (fl)); + fl.l_type = F_WRLCK; + fl.l_whence = 0; + fl.l_start = lr->lr_offset; + fl.l_len = lr->lr_length; + + error = VOP_SPACE(ZTOV(zp), F_FREESP, &fl, FWRITE | FOFFMAX, + lr->lr_offset, kcred, NULL); + + VN_RELE(ZTOV(zp)); + + return (error); +} + +static int +zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap) +{ + znode_t *zp; + xvattr_t xva; + vattr_t *vap = &xva.xva_vattr; + int error; + void *start; + + xva_init(&xva); + if (byteswap) { + byteswap_uint64_array(lr, sizeof (*lr)); + + if ((lr->lr_mask & AT_XVATTR) && + zfsvfs->z_version >= ZPL_VERSION_INITIAL) + zfs_replay_swap_attrs((lr_attr_t *)(lr + 1)); + } + + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { + /* + * As we can log setattrs out of order, it's possible the + * file has been removed. In this case just drop the setattr + * and return success. + */ + if (error == ENOENT) + error = 0; + return (error); + } + + zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode, + lr->lr_uid, lr->lr_gid, 0, lr->lr_foid); + + vap->va_size = lr->lr_size; + ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime); + ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime); + + /* + * Fill in xvattr_t portions if necessary. + */ + + start = (lr_setattr_t *)(lr + 1); + if (vap->va_mask & AT_XVATTR) { + zfs_replay_xvattr((lr_attr_t *)start, &xva); + start = (caddr_t)start + + ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize); + } else + xva.xva_vattr.va_mask &= ~AT_XVATTR; + + zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, + lr->lr_uid, lr->lr_gid); + + error = VOP_SETATTR(ZTOV(zp), vap, 0, kcred, NULL); + + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + zfsvfs->z_fuid_replay = NULL; + VN_RELE(ZTOV(zp)); + + return (error); +} + +static int +zfs_replay_acl_v0(zfsvfs_t *zfsvfs, lr_acl_v0_t *lr, boolean_t byteswap) +{ + ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */ + vsecattr_t vsa; + znode_t *zp; + int error; + + if (byteswap) { + byteswap_uint64_array(lr, sizeof (*lr)); + zfs_oldace_byteswap(ace, lr->lr_aclcnt); + } + + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { + /* + * As we can log acls out of order, it's possible the + * file has been removed. In this case just drop the acl + * and return success. + */ + if (error == ENOENT) + error = 0; + return (error); + } + + bzero(&vsa, sizeof (vsa)); + vsa.vsa_mask = VSA_ACE | VSA_ACECNT; + vsa.vsa_aclcnt = lr->lr_aclcnt; + vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt; + vsa.vsa_aclflags = 0; + vsa.vsa_aclentp = ace; + + error = VOP_SETSECATTR(ZTOV(zp), &vsa, 0, kcred, NULL); + + VN_RELE(ZTOV(zp)); + + return (error); +} + +/* + * Replaying ACLs is complicated by FUID support. + * The log record may contain some optional data + * to be used for replaying FUID's. These pieces + * are the actual FUIDs that were created initially. + * The FUID table index may no longer be valid and + * during zfs_create() a new index may be assigned. + * Because of this the log will contain the original + * doman+rid in order to create a new FUID. + * + * The individual ACEs may contain an ephemeral uid/gid which is no + * longer valid and will need to be replaced with an actual FUID. + * + */ +static int +zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap) +{ + ace_t *ace = (ace_t *)(lr + 1); + vsecattr_t vsa; + znode_t *zp; + int error; + + if (byteswap) { + byteswap_uint64_array(lr, sizeof (*lr)); + zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE); + if (lr->lr_fuidcnt) { + byteswap_uint64_array((caddr_t)ace + + ZIL_ACE_LENGTH(lr->lr_acl_bytes), + lr->lr_fuidcnt * sizeof (uint64_t)); + } + } + + if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { + /* + * As we can log acls out of order, it's possible the + * file has been removed. In this case just drop the acl + * and return success. + */ + if (error == ENOENT) + error = 0; + return (error); + } + + bzero(&vsa, sizeof (vsa)); + vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS; + vsa.vsa_aclcnt = lr->lr_aclcnt; + vsa.vsa_aclentp = ace; + vsa.vsa_aclentsz = lr->lr_acl_bytes; + vsa.vsa_aclflags = lr->lr_acl_flags; + + if (lr->lr_fuidcnt) { + void *fuidstart = (caddr_t)ace + + ZIL_ACE_LENGTH(lr->lr_acl_bytes); + + zfsvfs->z_fuid_replay = + zfs_replay_fuids(fuidstart, &fuidstart, + lr->lr_fuidcnt, lr->lr_domcnt, 0, 0); + } + + error = VOP_SETSECATTR(ZTOV(zp), &vsa, 0, kcred, NULL); + + if (zfsvfs->z_fuid_replay) + zfs_fuid_info_free(zfsvfs->z_fuid_replay); + + zfsvfs->z_fuid_replay = NULL; + VN_RELE(ZTOV(zp)); + + return (error); +} + +/* + * Callback vectors for replaying records + */ +zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = { + zfs_replay_error, /* 0 no such transaction type */ + zfs_replay_create, /* TX_CREATE */ + zfs_replay_create, /* TX_MKDIR */ + zfs_replay_create, /* TX_MKXATTR */ + zfs_replay_create, /* TX_SYMLINK */ + zfs_replay_remove, /* TX_REMOVE */ + zfs_replay_remove, /* TX_RMDIR */ + zfs_replay_link, /* TX_LINK */ + zfs_replay_rename, /* TX_RENAME */ + zfs_replay_write, /* TX_WRITE */ + zfs_replay_truncate, /* TX_TRUNCATE */ + zfs_replay_setattr, /* TX_SETATTR */ + zfs_replay_acl_v0, /* TX_ACL_V0 */ + zfs_replay_acl, /* TX_ACL */ + zfs_replay_create_acl, /* TX_CREATE_ACL */ + zfs_replay_create, /* TX_CREATE_ATTR */ + zfs_replay_create_acl, /* TX_CREATE_ACL_ATTR */ + zfs_replay_create_acl, /* TX_MKDIR_ACL */ + zfs_replay_create, /* TX_MKDIR_ATTR */ + zfs_replay_create_acl, /* TX_MKDIR_ACL_ATTR */ +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_rlock.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_rlock.c new file mode 100644 index 000000000000..f0a75b5fa0d7 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_rlock.c @@ -0,0 +1,602 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#pragma ident "%Z%%M% %I% %E% SMI" + +/* + * This file contains the code to implement file range locking in + * ZFS, although there isn't much specific to ZFS (all that comes to mind + * support for growing the blocksize). + * + * Interface + * --------- + * Defined in zfs_rlock.h but essentially: + * rl = zfs_range_lock(zp, off, len, lock_type); + * zfs_range_unlock(rl); + * zfs_range_reduce(rl, off, len); + * + * AVL tree + * -------- + * An AVL tree is used to maintain the state of the existing ranges + * that are locked for exclusive (writer) or shared (reader) use. + * The starting range offset is used for searching and sorting the tree. + * + * Common case + * ----------- + * The (hopefully) usual case is of no overlaps or contention for + * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree + * searched that finds no overlap, and *this* rl_t is placed in the tree. + * + * Overlaps/Reference counting/Proxy locks + * --------------------------------------- + * The avl code only allows one node at a particular offset. Also it's very + * inefficient to search through all previous entries looking for overlaps + * (because the very 1st in the ordered list might be at offset 0 but + * cover the whole file). + * So this implementation uses reference counts and proxy range locks. + * Firstly, only reader locks use reference counts and proxy locks, + * because writer locks are exclusive. + * When a reader lock overlaps with another then a proxy lock is created + * for that range and replaces the original lock. If the overlap + * is exact then the reference count of the proxy is simply incremented. + * Otherwise, the proxy lock is split into smaller lock ranges and + * new proxy locks created for non overlapping ranges. + * The reference counts are adjusted accordingly. + * Meanwhile, the orginal lock is kept around (this is the callers handle) + * and its offset and length are used when releasing the lock. + * + * Thread coordination + * ------------------- + * In order to make wakeups efficient and to ensure multiple continuous + * readers on a range don't starve a writer for the same range lock, + * two condition variables are allocated in each rl_t. + * If a writer (or reader) can't get a range it initialises the writer + * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; + * and waits on that cv. When a thread unlocks that range it wakes up all + * writers then all readers before destroying the lock. + * + * Append mode writes + * ------------------ + * Append mode writes need to lock a range at the end of a file. + * The offset of the end of the file is determined under the + * range locking mutex, and the lock type converted from RL_APPEND to + * RL_WRITER and the range locked. + * + * Grow block handling + * ------------------- + * ZFS supports multiple block sizes currently upto 128K. The smallest + * block size is used for the file which is grown as needed. During this + * growth all other writers and readers must be excluded. + * So if the block size needs to be grown then the whole file is + * exclusively locked, then later the caller will reduce the lock + * range to just the range to be written using zfs_reduce_range. + */ + +#include + +/* + * Check if a write lock can be grabbed, or wait and recheck until available. + */ +static void +zfs_range_lock_writer(znode_t *zp, rl_t *new) +{ + avl_tree_t *tree = &zp->z_range_avl; + rl_t *rl; + avl_index_t where; + uint64_t end_size; + uint64_t off = new->r_off; + uint64_t len = new->r_len; + + for (;;) { + /* + * Range locking is also used by zvol and uses a + * dummied up znode. However, for zvol, we don't need to + * append or grow blocksize, and besides we don't have + * a z_phys or z_zfsvfs - so skip that processing. + * + * Yes, this is ugly, and would be solved by not handling + * grow or append in range lock code. If that was done then + * we could make the range locking code generically available + * to other non-zfs consumers. + */ + if (zp->z_vnode) { /* caller is ZPL */ + /* + * If in append mode pick up the current end of file. + * This is done under z_range_lock to avoid races. + */ + if (new->r_type == RL_APPEND) + new->r_off = zp->z_phys->zp_size; + + /* + * If we need to grow the block size then grab the whole + * file range. This is also done under z_range_lock to + * avoid races. + */ + end_size = MAX(zp->z_phys->zp_size, new->r_off + len); + if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) || + zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) { + new->r_off = 0; + new->r_len = UINT64_MAX; + } + } + + /* + * First check for the usual case of no locks + */ + if (avl_numnodes(tree) == 0) { + new->r_type = RL_WRITER; /* convert to writer */ + avl_add(tree, new); + return; + } + + /* + * Look for any locks in the range. + */ + rl = avl_find(tree, new, &where); + if (rl) + goto wait; /* already locked at same offset */ + + rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); + if (rl && (rl->r_off < new->r_off + new->r_len)) + goto wait; + + rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); + if (rl && rl->r_off + rl->r_len > new->r_off) + goto wait; + + new->r_type = RL_WRITER; /* convert possible RL_APPEND */ + avl_insert(tree, new, where); + return; +wait: + if (!rl->r_write_wanted) { + cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); + rl->r_write_wanted = B_TRUE; + } + cv_wait(&rl->r_wr_cv, &zp->z_range_lock); + + /* reset to original */ + new->r_off = off; + new->r_len = len; + } +} + +/* + * If this is an original (non-proxy) lock then replace it by + * a proxy and return the proxy. + */ +static rl_t * +zfs_range_proxify(avl_tree_t *tree, rl_t *rl) +{ + rl_t *proxy; + + if (rl->r_proxy) + return (rl); /* already a proxy */ + + ASSERT3U(rl->r_cnt, ==, 1); + ASSERT(rl->r_write_wanted == B_FALSE); + ASSERT(rl->r_read_wanted == B_FALSE); + avl_remove(tree, rl); + rl->r_cnt = 0; + + /* create a proxy range lock */ + proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); + proxy->r_off = rl->r_off; + proxy->r_len = rl->r_len; + proxy->r_cnt = 1; + proxy->r_type = RL_READER; + proxy->r_proxy = B_TRUE; + proxy->r_write_wanted = B_FALSE; + proxy->r_read_wanted = B_FALSE; + avl_add(tree, proxy); + + return (proxy); +} + +/* + * Split the range lock at the supplied offset + * returning the *front* proxy. + */ +static rl_t * +zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) +{ + rl_t *front, *rear; + + ASSERT3U(rl->r_len, >, 1); + ASSERT3U(off, >, rl->r_off); + ASSERT3U(off, <, rl->r_off + rl->r_len); + ASSERT(rl->r_write_wanted == B_FALSE); + ASSERT(rl->r_read_wanted == B_FALSE); + + /* create the rear proxy range lock */ + rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); + rear->r_off = off; + rear->r_len = rl->r_off + rl->r_len - off; + rear->r_cnt = rl->r_cnt; + rear->r_type = RL_READER; + rear->r_proxy = B_TRUE; + rear->r_write_wanted = B_FALSE; + rear->r_read_wanted = B_FALSE; + + front = zfs_range_proxify(tree, rl); + front->r_len = off - rl->r_off; + + avl_insert_here(tree, rear, front, AVL_AFTER); + return (front); +} + +/* + * Create and add a new proxy range lock for the supplied range. + */ +static void +zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) +{ + rl_t *rl; + + ASSERT(len); + rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); + rl->r_off = off; + rl->r_len = len; + rl->r_cnt = 1; + rl->r_type = RL_READER; + rl->r_proxy = B_TRUE; + rl->r_write_wanted = B_FALSE; + rl->r_read_wanted = B_FALSE; + avl_add(tree, rl); +} + +static void +zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) +{ + rl_t *next; + uint64_t off = new->r_off; + uint64_t len = new->r_len; + + /* + * prev arrives either: + * - pointing to an entry at the same offset + * - pointing to the entry with the closest previous offset whose + * range may overlap with the new range + * - null, if there were no ranges starting before the new one + */ + if (prev) { + if (prev->r_off + prev->r_len <= off) { + prev = NULL; + } else if (prev->r_off != off) { + /* + * convert to proxy if needed then + * split this entry and bump ref count + */ + prev = zfs_range_split(tree, prev, off); + prev = AVL_NEXT(tree, prev); /* move to rear range */ + } + } + ASSERT((prev == NULL) || (prev->r_off == off)); + + if (prev) + next = prev; + else + next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); + + if (next == NULL || off + len <= next->r_off) { + /* no overlaps, use the original new rl_t in the tree */ + avl_insert(tree, new, where); + return; + } + + if (off < next->r_off) { + /* Add a proxy for initial range before the overlap */ + zfs_range_new_proxy(tree, off, next->r_off - off); + } + + new->r_cnt = 0; /* will use proxies in tree */ + /* + * We now search forward through the ranges, until we go past the end + * of the new range. For each entry we make it a proxy if it + * isn't already, then bump its reference count. If there's any + * gaps between the ranges then we create a new proxy range. + */ + for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { + if (off + len <= next->r_off) + break; + if (prev && prev->r_off + prev->r_len < next->r_off) { + /* there's a gap */ + ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); + zfs_range_new_proxy(tree, prev->r_off + prev->r_len, + next->r_off - (prev->r_off + prev->r_len)); + } + if (off + len == next->r_off + next->r_len) { + /* exact overlap with end */ + next = zfs_range_proxify(tree, next); + next->r_cnt++; + return; + } + if (off + len < next->r_off + next->r_len) { + /* new range ends in the middle of this block */ + next = zfs_range_split(tree, next, off + len); + next->r_cnt++; + return; + } + ASSERT3U(off + len, >, next->r_off + next->r_len); + next = zfs_range_proxify(tree, next); + next->r_cnt++; + } + + /* Add the remaining end range. */ + zfs_range_new_proxy(tree, prev->r_off + prev->r_len, + (off + len) - (prev->r_off + prev->r_len)); +} + +/* + * Check if a reader lock can be grabbed, or wait and recheck until available. + */ +static void +zfs_range_lock_reader(znode_t *zp, rl_t *new) +{ + avl_tree_t *tree = &zp->z_range_avl; + rl_t *prev, *next; + avl_index_t where; + uint64_t off = new->r_off; + uint64_t len = new->r_len; + + /* + * Look for any writer locks in the range. + */ +retry: + prev = avl_find(tree, new, &where); + if (prev == NULL) + prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); + + /* + * Check the previous range for a writer lock overlap. + */ + if (prev && (off < prev->r_off + prev->r_len)) { + if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { + if (!prev->r_read_wanted) { + cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); + prev->r_read_wanted = B_TRUE; + } + cv_wait(&prev->r_rd_cv, &zp->z_range_lock); + goto retry; + } + if (off + len < prev->r_off + prev->r_len) + goto got_lock; + } + + /* + * Search through the following ranges to see if there's + * write lock any overlap. + */ + if (prev) + next = AVL_NEXT(tree, prev); + else + next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); + for (; next; next = AVL_NEXT(tree, next)) { + if (off + len <= next->r_off) + goto got_lock; + if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { + if (!next->r_read_wanted) { + cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); + next->r_read_wanted = B_TRUE; + } + cv_wait(&next->r_rd_cv, &zp->z_range_lock); + goto retry; + } + if (off + len <= next->r_off + next->r_len) + goto got_lock; + } + +got_lock: + /* + * Add the read lock, which may involve splitting existing + * locks and bumping ref counts (r_cnt). + */ + zfs_range_add_reader(tree, new, prev, where); +} + +/* + * Lock a range (offset, length) as either shared (RL_READER) + * or exclusive (RL_WRITER). Returns the range lock structure + * for later unlocking or reduce range (if entire file + * previously locked as RL_WRITER). + */ +rl_t * +zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) +{ + rl_t *new; + + ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); + + new = kmem_alloc(sizeof (rl_t), KM_SLEEP); + new->r_zp = zp; + new->r_off = off; + new->r_len = len; + new->r_cnt = 1; /* assume it's going to be in the tree */ + new->r_type = type; + new->r_proxy = B_FALSE; + new->r_write_wanted = B_FALSE; + new->r_read_wanted = B_FALSE; + + mutex_enter(&zp->z_range_lock); + if (type == RL_READER) { + /* + * First check for the usual case of no locks + */ + if (avl_numnodes(&zp->z_range_avl) == 0) + avl_add(&zp->z_range_avl, new); + else + zfs_range_lock_reader(zp, new); + } else + zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ + mutex_exit(&zp->z_range_lock); + return (new); +} + +/* + * Unlock a reader lock + */ +static void +zfs_range_unlock_reader(znode_t *zp, rl_t *remove) +{ + avl_tree_t *tree = &zp->z_range_avl; + rl_t *rl, *next; + uint64_t len; + + /* + * The common case is when the remove entry is in the tree + * (cnt == 1) meaning there's been no other reader locks overlapping + * with this one. Otherwise the remove entry will have been + * removed from the tree and replaced by proxies (one or + * more ranges mapping to the entire range). + */ + if (remove->r_cnt == 1) { + avl_remove(tree, remove); + if (remove->r_write_wanted) { + cv_broadcast(&remove->r_wr_cv); + cv_destroy(&remove->r_wr_cv); + } + if (remove->r_read_wanted) { + cv_broadcast(&remove->r_rd_cv); + cv_destroy(&remove->r_rd_cv); + } + } else { + ASSERT3U(remove->r_cnt, ==, 0); + ASSERT3U(remove->r_write_wanted, ==, 0); + ASSERT3U(remove->r_read_wanted, ==, 0); + /* + * Find start proxy representing this reader lock, + * then decrement ref count on all proxies + * that make up this range, freeing them as needed. + */ + rl = avl_find(tree, remove, NULL); + ASSERT(rl); + ASSERT(rl->r_cnt); + ASSERT(rl->r_type == RL_READER); + for (len = remove->r_len; len != 0; rl = next) { + len -= rl->r_len; + if (len) { + next = AVL_NEXT(tree, rl); + ASSERT(next); + ASSERT(rl->r_off + rl->r_len == next->r_off); + ASSERT(next->r_cnt); + ASSERT(next->r_type == RL_READER); + } + rl->r_cnt--; + if (rl->r_cnt == 0) { + avl_remove(tree, rl); + if (rl->r_write_wanted) { + cv_broadcast(&rl->r_wr_cv); + cv_destroy(&rl->r_wr_cv); + } + if (rl->r_read_wanted) { + cv_broadcast(&rl->r_rd_cv); + cv_destroy(&rl->r_rd_cv); + } + kmem_free(rl, sizeof (rl_t)); + } + } + } + kmem_free(remove, sizeof (rl_t)); +} + +/* + * Unlock range and destroy range lock structure. + */ +void +zfs_range_unlock(rl_t *rl) +{ + znode_t *zp = rl->r_zp; + + ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); + ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); + ASSERT(!rl->r_proxy); + + mutex_enter(&zp->z_range_lock); + if (rl->r_type == RL_WRITER) { + /* writer locks can't be shared or split */ + avl_remove(&zp->z_range_avl, rl); + mutex_exit(&zp->z_range_lock); + if (rl->r_write_wanted) { + cv_broadcast(&rl->r_wr_cv); + cv_destroy(&rl->r_wr_cv); + } + if (rl->r_read_wanted) { + cv_broadcast(&rl->r_rd_cv); + cv_destroy(&rl->r_rd_cv); + } + kmem_free(rl, sizeof (rl_t)); + } else { + /* + * lock may be shared, let zfs_range_unlock_reader() + * release the lock and free the rl_t + */ + zfs_range_unlock_reader(zp, rl); + mutex_exit(&zp->z_range_lock); + } +} + +/* + * Reduce range locked as RL_WRITER from whole file to specified range. + * Asserts the whole file is exclusivly locked and so there's only one + * entry in the tree. + */ +void +zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) +{ + znode_t *zp = rl->r_zp; + + /* Ensure there are no other locks */ + ASSERT(avl_numnodes(&zp->z_range_avl) == 1); + ASSERT(rl->r_off == 0); + ASSERT(rl->r_type == RL_WRITER); + ASSERT(!rl->r_proxy); + ASSERT3U(rl->r_len, ==, UINT64_MAX); + ASSERT3U(rl->r_cnt, ==, 1); + + mutex_enter(&zp->z_range_lock); + rl->r_off = off; + rl->r_len = len; + mutex_exit(&zp->z_range_lock); + if (rl->r_write_wanted) + cv_broadcast(&rl->r_wr_cv); + if (rl->r_read_wanted) + cv_broadcast(&rl->r_rd_cv); +} + +/* + * AVL comparison function used to order range locks + * Locks are ordered on the start offset of the range. + */ +int +zfs_range_compare(const void *arg1, const void *arg2) +{ + const rl_t *rl1 = arg1; + const rl_t *rl2 = arg2; + + if (rl1->r_off > rl2->r_off) + return (1); + if (rl1->r_off < rl2->r_off) + return (-1); + return (0); +} diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vfsops.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vfsops.c new file mode 100644 index 000000000000..06b4dee4620b --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vfsops.c @@ -0,0 +1,1652 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fs/fs_subr.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int zfsfstype; +vfsops_t *zfs_vfsops = NULL; +static major_t zfs_major; +static minor_t zfs_minor; +static kmutex_t zfs_dev_mtx; + +static int zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr); +static int zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr); +static int zfs_mountroot(vfs_t *vfsp, enum whymountroot); +static int zfs_root(vfs_t *vfsp, vnode_t **vpp); +static int zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp); +static int zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp); +static void zfs_freevfs(vfs_t *vfsp); + +static const fs_operation_def_t zfs_vfsops_template[] = { + VFSNAME_MOUNT, { .vfs_mount = zfs_mount }, + VFSNAME_MOUNTROOT, { .vfs_mountroot = zfs_mountroot }, + VFSNAME_UNMOUNT, { .vfs_unmount = zfs_umount }, + VFSNAME_ROOT, { .vfs_root = zfs_root }, + VFSNAME_STATVFS, { .vfs_statvfs = zfs_statvfs }, + VFSNAME_SYNC, { .vfs_sync = zfs_sync }, + VFSNAME_VGET, { .vfs_vget = zfs_vget }, + VFSNAME_FREEVFS, { .vfs_freevfs = zfs_freevfs }, + NULL, NULL +}; + +static const fs_operation_def_t zfs_vfsops_eio_template[] = { + VFSNAME_FREEVFS, { .vfs_freevfs = zfs_freevfs }, + NULL, NULL +}; + +/* + * We need to keep a count of active fs's. + * This is necessary to prevent our module + * from being unloaded after a umount -f + */ +static uint32_t zfs_active_fs_count = 0; + +static char *noatime_cancel[] = { MNTOPT_ATIME, NULL }; +static char *atime_cancel[] = { MNTOPT_NOATIME, NULL }; +static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL }; +static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL }; + +/* + * MO_DEFAULT is not used since the default value is determined + * by the equivalent property. + */ +static mntopt_t mntopts[] = { + { MNTOPT_NOXATTR, noxattr_cancel, NULL, 0, NULL }, + { MNTOPT_XATTR, xattr_cancel, NULL, 0, NULL }, + { MNTOPT_NOATIME, noatime_cancel, NULL, 0, NULL }, + { MNTOPT_ATIME, atime_cancel, NULL, 0, NULL } +}; + +static mntopts_t zfs_mntopts = { + sizeof (mntopts) / sizeof (mntopt_t), + mntopts +}; + +/*ARGSUSED*/ +int +zfs_sync(vfs_t *vfsp, short flag, cred_t *cr) +{ + /* + * Data integrity is job one. We don't want a compromised kernel + * writing to the storage pool, so we never sync during panic. + */ + if (panicstr) + return (0); + + /* + * SYNC_ATTR is used by fsflush() to force old filesystems like UFS + * to sync metadata, which they would otherwise cache indefinitely. + * Semantically, the only requirement is that the sync be initiated. + * The DMU syncs out txgs frequently, so there's nothing to do. + */ + if (flag & SYNC_ATTR) + return (0); + + if (vfsp != NULL) { + /* + * Sync a specific filesystem. + */ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + + ZFS_ENTER(zfsvfs); + if (zfsvfs->z_log != NULL) + zil_commit(zfsvfs->z_log, UINT64_MAX, 0); + else + txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); + ZFS_EXIT(zfsvfs); + } else { + /* + * Sync all ZFS filesystems. This is what happens when you + * run sync(1M). Unlike other filesystems, ZFS honors the + * request by waiting for all pools to commit all dirty data. + */ + spa_sync_allpools(); + } + + return (0); +} + +static int +zfs_create_unique_device(dev_t *dev) +{ + major_t new_major; + + do { + ASSERT3U(zfs_minor, <=, MAXMIN32); + minor_t start = zfs_minor; + do { + mutex_enter(&zfs_dev_mtx); + if (zfs_minor >= MAXMIN32) { + /* + * If we're still using the real major + * keep out of /dev/zfs and /dev/zvol minor + * number space. If we're using a getudev()'ed + * major number, we can use all of its minors. + */ + if (zfs_major == ddi_name_to_major(ZFS_DRIVER)) + zfs_minor = ZFS_MIN_MINOR; + else + zfs_minor = 0; + } else { + zfs_minor++; + } + *dev = makedevice(zfs_major, zfs_minor); + mutex_exit(&zfs_dev_mtx); + } while (vfs_devismounted(*dev) && zfs_minor != start); + if (zfs_minor == start) { + /* + * We are using all ~262,000 minor numbers for the + * current major number. Create a new major number. + */ + if ((new_major = getudev()) == (major_t)-1) { + cmn_err(CE_WARN, + "zfs_mount: Can't get unique major " + "device number."); + return (-1); + } + mutex_enter(&zfs_dev_mtx); + zfs_major = new_major; + zfs_minor = 0; + + mutex_exit(&zfs_dev_mtx); + } else { + break; + } + /* CONSTANTCONDITION */ + } while (1); + + return (0); +} + +static void +atime_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval == TRUE) { + zfsvfs->z_atime = TRUE; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0); + } else { + zfsvfs->z_atime = FALSE; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0); + } +} + +static void +xattr_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval == TRUE) { + /* XXX locking on vfs_flag? */ + zfsvfs->z_vfs->vfs_flag |= VFS_XATTR; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0); + } else { + /* XXX locking on vfs_flag? */ + zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0); + } +} + +static void +blksz_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval < SPA_MINBLOCKSIZE || + newval > SPA_MAXBLOCKSIZE || !ISP2(newval)) + newval = SPA_MAXBLOCKSIZE; + + zfsvfs->z_max_blksz = newval; + zfsvfs->z_vfs->vfs_bsize = newval; +} + +static void +readonly_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval) { + /* XXX locking on vfs_flag? */ + zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0); + } else { + /* XXX locking on vfs_flag? */ + zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0); + } +} + +static void +devices_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval == FALSE) { + zfsvfs->z_vfs->vfs_flag |= VFS_NODEVICES; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES, NULL, 0); + } else { + zfsvfs->z_vfs->vfs_flag &= ~VFS_NODEVICES; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES, NULL, 0); + } +} + +static void +setuid_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval == FALSE) { + zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0); + } else { + zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0); + } +} + +static void +exec_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + if (newval == FALSE) { + zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0); + } else { + zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC; + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0); + } +} + +/* + * The nbmand mount option can be changed at mount time. + * We can't allow it to be toggled on live file systems or incorrect + * behavior may be seen from cifs clients + * + * This property isn't registered via dsl_prop_register(), but this callback + * will be called when a file system is first mounted + */ +static void +nbmand_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + if (newval == FALSE) { + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0); + } else { + vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND); + vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0); + } +} + +static void +snapdir_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + zfsvfs->z_show_ctldir = newval; +} + +static void +vscan_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + zfsvfs->z_vscan = newval; +} + +static void +acl_mode_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + zfsvfs->z_acl_mode = newval; +} + +static void +acl_inherit_changed_cb(void *arg, uint64_t newval) +{ + zfsvfs_t *zfsvfs = arg; + + zfsvfs->z_acl_inherit = newval; +} + +static int +zfs_register_callbacks(vfs_t *vfsp) +{ + struct dsl_dataset *ds = NULL; + objset_t *os = NULL; + zfsvfs_t *zfsvfs = NULL; + uint64_t nbmand; + int readonly, do_readonly = B_FALSE; + int setuid, do_setuid = B_FALSE; + int exec, do_exec = B_FALSE; + int devices, do_devices = B_FALSE; + int xattr, do_xattr = B_FALSE; + int atime, do_atime = B_FALSE; + int error = 0; + + ASSERT(vfsp); + zfsvfs = vfsp->vfs_data; + ASSERT(zfsvfs); + os = zfsvfs->z_os; + + /* + * The act of registering our callbacks will destroy any mount + * options we may have. In order to enable temporary overrides + * of mount options, we stash away the current values and + * restore them after we register the callbacks. + */ + if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) { + readonly = B_TRUE; + do_readonly = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) { + readonly = B_FALSE; + do_readonly = B_TRUE; + } + if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) { + devices = B_FALSE; + setuid = B_FALSE; + do_devices = B_TRUE; + do_setuid = B_TRUE; + } else { + if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) { + devices = B_FALSE; + do_devices = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL)) { + devices = B_TRUE; + do_devices = B_TRUE; + } + + if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) { + setuid = B_FALSE; + do_setuid = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) { + setuid = B_TRUE; + do_setuid = B_TRUE; + } + } + if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) { + exec = B_FALSE; + do_exec = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) { + exec = B_TRUE; + do_exec = B_TRUE; + } + if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) { + xattr = B_FALSE; + do_xattr = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) { + xattr = B_TRUE; + do_xattr = B_TRUE; + } + if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) { + atime = B_FALSE; + do_atime = B_TRUE; + } else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) { + atime = B_TRUE; + do_atime = B_TRUE; + } + + /* + * nbmand is a special property. It can only be changed at + * mount time. + * + * This is weird, but it is documented to only be changeable + * at mount time. + */ + if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) { + nbmand = B_FALSE; + } else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) { + nbmand = B_TRUE; + } else { + char osname[MAXNAMELEN]; + + dmu_objset_name(os, osname); + if (error = dsl_prop_get_integer(osname, "nbmand", &nbmand, + NULL)) { + return (error); + } + } + + /* + * Register property callbacks. + * + * It would probably be fine to just check for i/o error from + * the first prop_register(), but I guess I like to go + * overboard... + */ + ds = dmu_objset_ds(os); + error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "xattr", xattr_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "recordsize", blksz_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "readonly", readonly_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "devices", devices_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "setuid", setuid_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "exec", exec_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "snapdir", snapdir_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "aclmode", acl_mode_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "aclinherit", acl_inherit_changed_cb, zfsvfs); + error = error ? error : dsl_prop_register(ds, + "vscan", vscan_changed_cb, zfsvfs); + if (error) + goto unregister; + + /* + * Invoke our callbacks to restore temporary mount options. + */ + if (do_readonly) + readonly_changed_cb(zfsvfs, readonly); + if (do_setuid) + setuid_changed_cb(zfsvfs, setuid); + if (do_exec) + exec_changed_cb(zfsvfs, exec); + if (do_devices) + devices_changed_cb(zfsvfs, devices); + if (do_xattr) + xattr_changed_cb(zfsvfs, xattr); + if (do_atime) + atime_changed_cb(zfsvfs, atime); + + nbmand_changed_cb(zfsvfs, nbmand); + + return (0); + +unregister: + /* + * We may attempt to unregister some callbacks that are not + * registered, but this is OK; it will simply return ENOMSG, + * which we will ignore. + */ + (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, zfsvfs); + (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb, + zfsvfs); + (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zfsvfs); + return (error); + +} + +static int +zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting) +{ + int error; + + error = zfs_register_callbacks(zfsvfs->z_vfs); + if (error) + return (error); + + /* + * Set the objset user_ptr to track its zfsvfs. + */ + mutex_enter(&zfsvfs->z_os->os->os_user_ptr_lock); + dmu_objset_set_user(zfsvfs->z_os, zfsvfs); + mutex_exit(&zfsvfs->z_os->os->os_user_ptr_lock); + + /* + * If we are not mounting (ie: online recv), then we don't + * have to worry about replaying the log as we blocked all + * operations out since we closed the ZIL. + */ + if (mounting) { + boolean_t readonly; + + /* + * During replay we remove the read only flag to + * allow replays to succeed. + */ + readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY; + zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY; + + /* + * Parse and replay the intent log. + */ + zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign, + zfs_replay_vector, zfs_unlinked_drain); + + zfs_unlinked_drain(zfsvfs); + zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */ + } + + if (!zil_disable) + zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data); + + return (0); +} + +static void +zfs_freezfsvfs(zfsvfs_t *zfsvfs) +{ + mutex_destroy(&zfsvfs->z_znodes_lock); + mutex_destroy(&zfsvfs->z_online_recv_lock); + list_destroy(&zfsvfs->z_all_znodes); + rrw_destroy(&zfsvfs->z_teardown_lock); + rw_destroy(&zfsvfs->z_teardown_inactive_lock); + rw_destroy(&zfsvfs->z_fuid_lock); + kmem_free(zfsvfs, sizeof (zfsvfs_t)); +} + +static int +zfs_domount(vfs_t *vfsp, char *osname) +{ + dev_t mount_dev; + uint64_t recordsize, readonly; + int error = 0; + int mode; + zfsvfs_t *zfsvfs; + znode_t *zp = NULL; + + ASSERT(vfsp); + ASSERT(osname); + + /* + * Initialize the zfs-specific filesystem structure. + * Should probably make this a kmem cache, shuffle fields, + * and just bzero up to z_hold_mtx[]. + */ + zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); + zfsvfs->z_vfs = vfsp; + zfsvfs->z_parent = zfsvfs; + zfsvfs->z_assign = TXG_NOWAIT; + zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE; + zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; + + mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zfsvfs->z_online_recv_lock, NULL, MUTEX_DEFAULT, NULL); + list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), + offsetof(znode_t, z_link_node)); + rrw_init(&zfsvfs->z_teardown_lock); + rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL); + rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL); + + /* Initialize the generic filesystem structure. */ + vfsp->vfs_bcount = 0; + vfsp->vfs_data = NULL; + + if (zfs_create_unique_device(&mount_dev) == -1) { + error = ENODEV; + goto out; + } + ASSERT(vfs_devismounted(mount_dev) == 0); + + if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize, + NULL)) + goto out; + + vfsp->vfs_dev = mount_dev; + vfsp->vfs_fstype = zfsfstype; + vfsp->vfs_bsize = recordsize; + vfsp->vfs_flag |= VFS_NOTRUNC; + vfsp->vfs_data = zfsvfs; + + if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL)) + goto out; + + mode = DS_MODE_OWNER; + if (readonly) + mode |= DS_MODE_READONLY; + + error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os); + if (error == EROFS) { + mode = DS_MODE_OWNER | DS_MODE_READONLY; + error = dmu_objset_open(osname, DMU_OST_ZFS, mode, + &zfsvfs->z_os); + } + + if (error) + goto out; + + if (error = zfs_init_fs(zfsvfs, &zp)) + goto out; + + /* The call to zfs_init_fs leaves the vnode held, release it here. */ + VN_RELE(ZTOV(zp)); + + /* + * Set features for file system. + */ + zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os); + if (zfsvfs->z_use_fuids) { + vfs_set_feature(vfsp, VFSFT_XVATTR); + vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS); + vfs_set_feature(vfsp, VFSFT_ACEMASKONACCESS); + vfs_set_feature(vfsp, VFSFT_ACLONCREATE); + } + if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) { + vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS); + vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE); + vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE); + } else if (zfsvfs->z_case == ZFS_CASE_MIXED) { + vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS); + vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE); + } + + if (dmu_objset_is_snapshot(zfsvfs->z_os)) { + uint64_t pval; + + ASSERT(mode & DS_MODE_READONLY); + atime_changed_cb(zfsvfs, B_FALSE); + readonly_changed_cb(zfsvfs, B_TRUE); + if (error = dsl_prop_get_integer(osname, "xattr", &pval, NULL)) + goto out; + xattr_changed_cb(zfsvfs, pval); + zfsvfs->z_issnap = B_TRUE; + } else { + error = zfsvfs_setup(zfsvfs, B_TRUE); + } + + if (!zfsvfs->z_issnap) + zfsctl_create(zfsvfs); +out: + if (error) { + if (zfsvfs->z_os) + dmu_objset_close(zfsvfs->z_os); + zfs_freezfsvfs(zfsvfs); + } else { + atomic_add_32(&zfs_active_fs_count, 1); + } + + return (error); +} + +void +zfs_unregister_callbacks(zfsvfs_t *zfsvfs) +{ + objset_t *os = zfsvfs->z_os; + struct dsl_dataset *ds; + + /* + * Unregister properties. + */ + if (!dmu_objset_is_snapshot(os)) { + ds = dmu_objset_ds(os); + VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, + zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "aclinherit", + acl_inherit_changed_cb, zfsvfs) == 0); + + VERIFY(dsl_prop_unregister(ds, "vscan", + vscan_changed_cb, zfsvfs) == 0); + } +} + +/* + * Convert a decimal digit string to a uint64_t integer. + */ +static int +str_to_uint64(char *str, uint64_t *objnum) +{ + uint64_t num = 0; + + while (*str) { + if (*str < '0' || *str > '9') + return (EINVAL); + + num = num*10 + *str++ - '0'; + } + + *objnum = num; + return (0); +} + +/* + * The boot path passed from the boot loader is in the form of + * "rootpool-name/root-filesystem-object-number'. Convert this + * string to a dataset name: "rootpool-name/root-filesystem-name". + */ +static int +zfs_parse_bootfs(char *bpath, char *outpath) +{ + char *slashp; + uint64_t objnum; + int error; + + if (*bpath == 0 || *bpath == '/') + return (EINVAL); + + (void) strcpy(outpath, bpath); + + slashp = strchr(bpath, '/'); + + /* if no '/', just return the pool name */ + if (slashp == NULL) { + return (0); + } + + /* if not a number, just return the root dataset name */ + if (str_to_uint64(slashp+1, &objnum)) { + return (0); + } + + *slashp = '\0'; + error = dsl_dsobj_to_dsname(bpath, objnum, outpath); + *slashp = '/'; + + return (error); +} + +static int +zfs_mountroot(vfs_t *vfsp, enum whymountroot why) +{ + int error = 0; + static int zfsrootdone = 0; + zfsvfs_t *zfsvfs = NULL; + znode_t *zp = NULL; + vnode_t *vp = NULL; + char *zfs_bootfs; + char *zfs_devid; + + ASSERT(vfsp); + + /* + * The filesystem that we mount as root is defined in the + * boot property "zfs-bootfs" with a format of + * "poolname/root-dataset-objnum". + */ + if (why == ROOT_INIT) { + if (zfsrootdone++) + return (EBUSY); + /* + * the process of doing a spa_load will require the + * clock to be set before we could (for example) do + * something better by looking at the timestamp on + * an uberblock, so just set it to -1. + */ + clkset(-1); + + if ((zfs_bootfs = spa_get_bootprop("zfs-bootfs")) == NULL) { + cmn_err(CE_NOTE, "spa_get_bootfs: can not get " + "bootfs name"); + return (EINVAL); + } + zfs_devid = spa_get_bootprop("diskdevid"); + error = spa_import_rootpool(rootfs.bo_name, zfs_devid); + if (zfs_devid) + spa_free_bootprop(zfs_devid); + if (error) { + spa_free_bootprop(zfs_bootfs); + cmn_err(CE_NOTE, "spa_import_rootpool: error %d", + error); + return (error); + } + if (error = zfs_parse_bootfs(zfs_bootfs, rootfs.bo_name)) { + spa_free_bootprop(zfs_bootfs); + cmn_err(CE_NOTE, "zfs_parse_bootfs: error %d", + error); + return (error); + } + + spa_free_bootprop(zfs_bootfs); + + if (error = vfs_lock(vfsp)) + return (error); + + if (error = zfs_domount(vfsp, rootfs.bo_name)) { + cmn_err(CE_NOTE, "zfs_domount: error %d", error); + goto out; + } + + zfsvfs = (zfsvfs_t *)vfsp->vfs_data; + ASSERT(zfsvfs); + if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &zp)) { + cmn_err(CE_NOTE, "zfs_zget: error %d", error); + goto out; + } + + vp = ZTOV(zp); + mutex_enter(&vp->v_lock); + vp->v_flag |= VROOT; + mutex_exit(&vp->v_lock); + rootvp = vp; + + /* + * Leave rootvp held. The root file system is never unmounted. + */ + + vfs_add((struct vnode *)0, vfsp, + (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0); +out: + vfs_unlock(vfsp); + return (error); + } else if (why == ROOT_REMOUNT) { + readonly_changed_cb(vfsp->vfs_data, B_FALSE); + vfsp->vfs_flag |= VFS_REMOUNT; + + /* refresh mount options */ + zfs_unregister_callbacks(vfsp->vfs_data); + return (zfs_register_callbacks(vfsp)); + + } else if (why == ROOT_UNMOUNT) { + zfs_unregister_callbacks((zfsvfs_t *)vfsp->vfs_data); + (void) zfs_sync(vfsp, 0, 0); + return (0); + } + + /* + * if "why" is equal to anything else other than ROOT_INIT, + * ROOT_REMOUNT, or ROOT_UNMOUNT, we do not support it. + */ + return (ENOTSUP); +} + +/*ARGSUSED*/ +static int +zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr) +{ + char *osname; + pathname_t spn; + int error = 0; + uio_seg_t fromspace = (uap->flags & MS_SYSSPACE) ? + UIO_SYSSPACE : UIO_USERSPACE; + int canwrite; + + if (mvp->v_type != VDIR) + return (ENOTDIR); + + mutex_enter(&mvp->v_lock); + if ((uap->flags & MS_REMOUNT) == 0 && + (uap->flags & MS_OVERLAY) == 0 && + (mvp->v_count != 1 || (mvp->v_flag & VROOT))) { + mutex_exit(&mvp->v_lock); + return (EBUSY); + } + mutex_exit(&mvp->v_lock); + + /* + * ZFS does not support passing unparsed data in via MS_DATA. + * Users should use the MS_OPTIONSTR interface; this means + * that all option parsing is already done and the options struct + * can be interrogated. + */ + if ((uap->flags & MS_DATA) && uap->datalen > 0) + return (EINVAL); + + /* + * Get the objset name (the "special" mount argument). + */ + if (error = pn_get(uap->spec, fromspace, &spn)) + return (error); + + osname = spn.pn_path; + + /* + * Check for mount privilege? + * + * If we don't have privilege then see if + * we have local permission to allow it + */ + error = secpolicy_fs_mount(cr, mvp, vfsp); + if (error) { + error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr); + if (error == 0) { + vattr_t vattr; + + /* + * Make sure user is the owner of the mount point + * or has sufficient privileges. + */ + + vattr.va_mask = AT_UID; + + if (error = VOP_GETATTR(mvp, &vattr, 0, cr, NULL)) { + goto out; + } + + if (secpolicy_vnode_owner(cr, vattr.va_uid) != 0 && + VOP_ACCESS(mvp, VWRITE, 0, cr, NULL) != 0) { + error = EPERM; + goto out; + } + + secpolicy_fs_mount_clearopts(cr, vfsp); + } else { + goto out; + } + } + + /* + * Refuse to mount a filesystem if we are in a local zone and the + * dataset is not visible. + */ + if (!INGLOBALZONE(curproc) && + (!zone_dataset_visible(osname, &canwrite) || !canwrite)) { + error = EPERM; + goto out; + } + + /* + * When doing a remount, we simply refresh our temporary properties + * according to those options set in the current VFS options. + */ + if (uap->flags & MS_REMOUNT) { + /* refresh mount options */ + zfs_unregister_callbacks(vfsp->vfs_data); + error = zfs_register_callbacks(vfsp); + goto out; + } + + error = zfs_domount(vfsp, osname); + +out: + pn_free(&spn); + return (error); +} + +static int +zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + dev32_t d32; + uint64_t refdbytes, availbytes, usedobjs, availobjs; + + ZFS_ENTER(zfsvfs); + + dmu_objset_space(zfsvfs->z_os, + &refdbytes, &availbytes, &usedobjs, &availobjs); + + /* + * The underlying storage pool actually uses multiple block sizes. + * We report the fragsize as the smallest block size we support, + * and we report our blocksize as the filesystem's maximum blocksize. + */ + statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT; + statp->f_bsize = zfsvfs->z_max_blksz; + + /* + * The following report "total" blocks of various kinds in the + * file system, but reported in terms of f_frsize - the + * "fragment" size. + */ + + statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT; + statp->f_bfree = availbytes >> SPA_MINBLOCKSHIFT; + statp->f_bavail = statp->f_bfree; /* no root reservation */ + + /* + * statvfs() should really be called statufs(), because it assumes + * static metadata. ZFS doesn't preallocate files, so the best + * we can do is report the max that could possibly fit in f_files, + * and that minus the number actually used in f_ffree. + * For f_ffree, report the smaller of the number of object available + * and the number of blocks (each object will take at least a block). + */ + statp->f_ffree = MIN(availobjs, statp->f_bfree); + statp->f_favail = statp->f_ffree; /* no "root reservation" */ + statp->f_files = statp->f_ffree + usedobjs; + + (void) cmpldev(&d32, vfsp->vfs_dev); + statp->f_fsid = d32; + + /* + * We're a zfs filesystem. + */ + (void) strcpy(statp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name); + + statp->f_flag = vf_to_stf(vfsp->vfs_flag); + + statp->f_namemax = ZFS_MAXNAMELEN; + + /* + * We have all of 32 characters to stuff a string here. + * Is there anything useful we could/should provide? + */ + bzero(statp->f_fstr, sizeof (statp->f_fstr)); + + ZFS_EXIT(zfsvfs); + return (0); +} + +static int +zfs_root(vfs_t *vfsp, vnode_t **vpp) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + znode_t *rootzp; + int error; + + ZFS_ENTER(zfsvfs); + + error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp); + if (error == 0) + *vpp = ZTOV(rootzp); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Teardown the zfsvfs::z_os. + * + * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' + * and 'z_teardown_inactive_lock' held. + */ +static int +zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) +{ + znode_t *zp; + + rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG); + + if (!unmounting) { + /* + * We purge the parent filesystem's vfsp as the parent + * filesystem and all of its snapshots have their vnode's + * v_vfsp set to the parent's filesystem's vfsp. Note, + * 'z_parent' is self referential for non-snapshots. + */ + (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0); + } + + /* + * Close the zil. NB: Can't close the zil while zfs_inactive + * threads are blocked as zil_close can call zfs_inactive. + */ + if (zfsvfs->z_log) { + zil_close(zfsvfs->z_log); + zfsvfs->z_log = NULL; + } + + rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER); + + /* + * If we are not unmounting (ie: online recv) and someone already + * unmounted this file system while we were doing the switcheroo, + * or a reopen of z_os failed then just bail out now. + */ + if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) { + rw_exit(&zfsvfs->z_teardown_inactive_lock); + rrw_exit(&zfsvfs->z_teardown_lock, FTAG); + return (EIO); + } + + /* + * At this point there are no vops active, and any new vops will + * fail with EIO since we have z_teardown_lock for writer (only + * relavent for forced unmount). + * + * Release all holds on dbufs. + */ + mutex_enter(&zfsvfs->z_znodes_lock); + for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; + zp = list_next(&zfsvfs->z_all_znodes, zp)) + if (zp->z_dbuf) { + ASSERT(ZTOV(zp)->v_count > 0); + zfs_znode_dmu_fini(zp); + } + mutex_exit(&zfsvfs->z_znodes_lock); + + /* + * If we are unmounting, set the unmounted flag and let new vops + * unblock. zfs_inactive will have the unmounted behavior, and all + * other vops will fail with EIO. + */ + if (unmounting) { + zfsvfs->z_unmounted = B_TRUE; + rrw_exit(&zfsvfs->z_teardown_lock, FTAG); + rw_exit(&zfsvfs->z_teardown_inactive_lock); + } + + /* + * z_os will be NULL if there was an error in attempting to reopen + * zfsvfs, so just return as the properties had already been + * unregistered and cached data had been evicted before. + */ + if (zfsvfs->z_os == NULL) + return (0); + + /* + * Unregister properties. + */ + zfs_unregister_callbacks(zfsvfs); + + /* + * Evict cached data + */ + if (dmu_objset_evict_dbufs(zfsvfs->z_os)) { + txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); + (void) dmu_objset_evict_dbufs(zfsvfs->z_os); + } + + return (0); +} + +/*ARGSUSED*/ +static int +zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + objset_t *os; + int ret; + + ret = secpolicy_fs_unmount(cr, vfsp); + if (ret) { + ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource), + ZFS_DELEG_PERM_MOUNT, cr); + if (ret) + return (ret); + } + + /* + * We purge the parent filesystem's vfsp as the parent filesystem + * and all of its snapshots have their vnode's v_vfsp set to the + * parent's filesystem's vfsp. Note, 'z_parent' is self + * referential for non-snapshots. + */ + (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0); + + /* + * Unmount any snapshots mounted under .zfs before unmounting the + * dataset itself. + */ + if (zfsvfs->z_ctldir != NULL && + (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) { + return (ret); + } + + if (!(fflag & MS_FORCE)) { + /* + * Check the number of active vnodes in the file system. + * Our count is maintained in the vfs structure, but the + * number is off by 1 to indicate a hold on the vfs + * structure itself. + * + * The '.zfs' directory maintains a reference of its + * own, and any active references underneath are + * reflected in the vnode count. + */ + if (zfsvfs->z_ctldir == NULL) { + if (vfsp->vfs_count > 1) + return (EBUSY); + } else { + if (vfsp->vfs_count > 2 || + zfsvfs->z_ctldir->v_count > 1) + return (EBUSY); + } + } + + vfsp->vfs_flag |= VFS_UNMOUNTED; + + VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0); + os = zfsvfs->z_os; + + /* + * z_os will be NULL if there was an error in + * attempting to reopen zfsvfs. + */ + if (os != NULL) { + /* + * Unset the objset user_ptr. + */ + mutex_enter(&os->os->os_user_ptr_lock); + dmu_objset_set_user(os, NULL); + mutex_exit(&os->os->os_user_ptr_lock); + + /* + * Finally release the objset + */ + dmu_objset_close(os); + } + + /* + * We can now safely destroy the '.zfs' directory node. + */ + if (zfsvfs->z_ctldir != NULL) + zfsctl_destroy(zfsvfs); + + return (0); +} + +static int +zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + znode_t *zp; + uint64_t object = 0; + uint64_t fid_gen = 0; + uint64_t gen_mask; + uint64_t zp_gen; + int i, err; + + *vpp = NULL; + + ZFS_ENTER(zfsvfs); + + if (fidp->fid_len == LONG_FID_LEN) { + zfid_long_t *zlfid = (zfid_long_t *)fidp; + uint64_t objsetid = 0; + uint64_t setgen = 0; + + for (i = 0; i < sizeof (zlfid->zf_setid); i++) + objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i); + + for (i = 0; i < sizeof (zlfid->zf_setgen); i++) + setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i); + + ZFS_EXIT(zfsvfs); + + err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs); + if (err) + return (EINVAL); + ZFS_ENTER(zfsvfs); + } + + if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) { + zfid_short_t *zfid = (zfid_short_t *)fidp; + + for (i = 0; i < sizeof (zfid->zf_object); i++) + object |= ((uint64_t)zfid->zf_object[i]) << (8 * i); + + for (i = 0; i < sizeof (zfid->zf_gen); i++) + fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i); + } else { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* A zero fid_gen means we are in the .zfs control directories */ + if (fid_gen == 0 && + (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { + *vpp = zfsvfs->z_ctldir; + ASSERT(*vpp != NULL); + if (object == ZFSCTL_INO_SNAPDIR) { + VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL, + 0, NULL, NULL, NULL, NULL, NULL) == 0); + } else { + VN_HOLD(*vpp); + } + ZFS_EXIT(zfsvfs); + return (0); + } + + gen_mask = -1ULL >> (64 - 8 * i); + + dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask); + if (err = zfs_zget(zfsvfs, object, &zp)) { + ZFS_EXIT(zfsvfs); + return (err); + } + zp_gen = zp->z_phys->zp_gen & gen_mask; + if (zp_gen == 0) + zp_gen = 1; + if (zp->z_unlinked || zp_gen != fid_gen) { + dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen); + VN_RELE(ZTOV(zp)); + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + *vpp = ZTOV(zp); + ZFS_EXIT(zfsvfs); + return (0); +} + +/* + * Block out VOPs and close zfsvfs_t::z_os + * + * Note, if successful, then we return with the 'z_teardown_lock' and + * 'z_teardown_inactive_lock' write held. + */ +int +zfs_suspend_fs(zfsvfs_t *zfsvfs, char *name, int *mode) +{ + int error; + + if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0) + return (error); + + *mode = zfsvfs->z_os->os_mode; + dmu_objset_name(zfsvfs->z_os, name); + dmu_objset_close(zfsvfs->z_os); + + return (0); +} + +/* + * Reopen zfsvfs_t::z_os and release VOPs. + */ +int +zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode) +{ + int err; + + ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock)); + ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)); + + err = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os); + if (err) { + zfsvfs->z_os = NULL; + } else { + znode_t *zp; + + VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0); + + /* + * Attempt to re-establish all the active znodes with + * their dbufs. If a zfs_rezget() fails, then we'll let + * any potential callers discover that via ZFS_ENTER_VERIFY_VP + * when they try to use their znode. + */ + mutex_enter(&zfsvfs->z_znodes_lock); + for (zp = list_head(&zfsvfs->z_all_znodes); zp; + zp = list_next(&zfsvfs->z_all_znodes, zp)) { + (void) zfs_rezget(zp); + } + mutex_exit(&zfsvfs->z_znodes_lock); + + } + + /* release the VOPs */ + rw_exit(&zfsvfs->z_teardown_inactive_lock); + rrw_exit(&zfsvfs->z_teardown_lock, FTAG); + + if (err) { + /* + * Since we couldn't reopen zfsvfs::z_os, force + * unmount this file system. + */ + if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0) + (void) dounmount(zfsvfs->z_vfs, MS_FORCE, CRED()); + } + return (err); +} + +static void +zfs_freevfs(vfs_t *vfsp) +{ + zfsvfs_t *zfsvfs = vfsp->vfs_data; + int i; + + for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) + mutex_destroy(&zfsvfs->z_hold_mtx[i]); + + zfs_fuid_destroy(zfsvfs); + zfs_freezfsvfs(zfsvfs); + + atomic_add_32(&zfs_active_fs_count, -1); +} + +/* + * VFS_INIT() initialization. Note that there is no VFS_FINI(), + * so we can't safely do any non-idempotent initialization here. + * Leave that to zfs_init() and zfs_fini(), which are called + * from the module's _init() and _fini() entry points. + */ +/*ARGSUSED*/ +static int +zfs_vfsinit(int fstype, char *name) +{ + int error; + + zfsfstype = fstype; + + /* + * Setup vfsops and vnodeops tables. + */ + error = vfs_setfsops(fstype, zfs_vfsops_template, &zfs_vfsops); + if (error != 0) { + cmn_err(CE_WARN, "zfs: bad vfs ops template"); + } + + error = zfs_create_op_tables(); + if (error) { + zfs_remove_op_tables(); + cmn_err(CE_WARN, "zfs: bad vnode ops template"); + (void) vfs_freevfsops_by_type(zfsfstype); + return (error); + } + + mutex_init(&zfs_dev_mtx, NULL, MUTEX_DEFAULT, NULL); + + /* + * Unique major number for all zfs mounts. + * If we run out of 32-bit minors, we'll getudev() another major. + */ + zfs_major = ddi_name_to_major(ZFS_DRIVER); + zfs_minor = ZFS_MIN_MINOR; + + return (0); +} + +void +zfs_init(void) +{ + /* + * Initialize .zfs directory structures + */ + zfsctl_init(); + + /* + * Initialize znode cache, vnode ops, etc... + */ + zfs_znode_init(); +} + +void +zfs_fini(void) +{ + zfsctl_fini(); + zfs_znode_fini(); +} + +int +zfs_busy(void) +{ + return (zfs_active_fs_count != 0); +} + +int +zfs_set_version(const char *name, uint64_t newvers) +{ + int error; + objset_t *os; + dmu_tx_t *tx; + uint64_t curvers; + + /* + * XXX for now, require that the filesystem be unmounted. Would + * be nice to find the zfsvfs_t and just update that if + * possible. + */ + + if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION) + return (EINVAL); + + error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_OWNER, &os); + if (error) + return (error); + + error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, + 8, 1, &curvers); + if (error) + goto out; + if (newvers < curvers) { + error = EINVAL; + goto out; + } + + tx = dmu_tx_create(os); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + goto out; + } + error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, + &newvers, tx); + + spa_history_internal_log(LOG_DS_UPGRADE, + dmu_objset_spa(os), tx, CRED(), + "oldver=%llu newver=%llu dataset = %llu", curvers, newvers, + dmu_objset_id(os)); + dmu_tx_commit(tx); + +out: + dmu_objset_close(os); + return (error); +} + +/* + * Read a property stored within the master node. + */ +int +zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value) +{ + const char *pname; + int error = ENOENT; + + /* + * Look up the file system's value for the property. For the + * version property, we look up a slightly different string. + */ + if (prop == ZFS_PROP_VERSION) + pname = ZPL_VERSION_STR; + else + pname = zfs_prop_to_name(prop); + + if (os != NULL) + error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value); + + if (error == ENOENT) { + /* No value set, use the default value */ + switch (prop) { + case ZFS_PROP_VERSION: + *value = ZPL_VERSION; + break; + case ZFS_PROP_NORMALIZE: + case ZFS_PROP_UTF8ONLY: + *value = 0; + break; + case ZFS_PROP_CASE: + *value = ZFS_CASE_SENSITIVE; + break; + default: + return (error); + } + error = 0; + } + return (error); +} + +static vfsdef_t vfw = { + VFSDEF_VERSION, + MNTTYPE_ZFS, + zfs_vfsinit, + VSW_HASPROTO|VSW_CANRWRO|VSW_CANREMOUNT|VSW_VOLATILEDEV|VSW_STATS| + VSW_XID, + &zfs_mntopts +}; + +struct modlfs zfs_modlfs = { + &mod_fsops, "ZFS filesystem version " SPA_VERSION_STRING, &vfw +}; diff --git a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c new file mode 100644 index 000000000000..8e0037e37da5 --- /dev/null +++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c @@ -0,0 +1,4561 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* Portions Copyright 2007 Jeremy Teo */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fs/fs_subr.h" +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Programming rules. + * + * Each vnode op performs some logical unit of work. To do this, the ZPL must + * properly lock its in-core state, create a DMU transaction, do the work, + * record this work in the intent log (ZIL), commit the DMU transaction, + * and wait for the intent log to commit if it is a synchronous operation. + * Moreover, the vnode ops must work in both normal and log replay context. + * The ordering of events is important to avoid deadlocks and references + * to freed memory. The example below illustrates the following Big Rules: + * + * (1) A check must be made in each zfs thread for a mounted file system. + * This is done avoiding races using ZFS_ENTER(zfsvfs). + * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes + * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros + * can return EIO from the calling function. + * + * (2) VN_RELE() should always be the last thing except for zil_commit() + * (if necessary) and ZFS_EXIT(). This is for 3 reasons: + * First, if it's the last reference, the vnode/znode + * can be freed, so the zp may point to freed memory. Second, the last + * reference will call zfs_zinactive(), which may induce a lot of work -- + * pushing cached pages (which acquires range locks) and syncing out + * cached atime changes. Third, zfs_zinactive() may require a new tx, + * which could deadlock the system if you were already holding one. + * + * (3) All range locks must be grabbed before calling dmu_tx_assign(), + * as they can span dmu_tx_assign() calls. + * + * (4) Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign(). + * In normal operation, this will be TXG_NOWAIT. During ZIL replay, + * it will be a specific txg. Either way, dmu_tx_assign() never blocks. + * This is critical because we don't want to block while holding locks. + * Note, in particular, that if a lock is sometimes acquired before + * the tx assigns, and sometimes after (e.g. z_lock), then failing to + * use a non-blocking assign can deadlock the system. The scenario: + * + * Thread A has grabbed a lock before calling dmu_tx_assign(). + * Thread B is in an already-assigned tx, and blocks for this lock. + * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() + * forever, because the previous txg can't quiesce until B's tx commits. + * + * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, + * then drop all locks, call dmu_tx_wait(), and try again. + * + * (5) If the operation succeeded, generate the intent log entry for it + * before dropping locks. This ensures that the ordering of events + * in the intent log matches the order in which they actually occurred. + * + * (6) At the end of each vnode op, the DMU tx must always commit, + * regardless of whether there were any errors. + * + * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid) + * to ensure that synchronous semantics are provided when necessary. + * + * In general, this is how things should be ordered in each vnode op: + * + * ZFS_ENTER(zfsvfs); // exit if unmounted + * top: + * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) + * rw_enter(...); // grab any other locks you need + * tx = dmu_tx_create(...); // get DMU tx + * dmu_tx_hold_*(); // hold each object you might modify + * error = dmu_tx_assign(tx, zfsvfs->z_assign); // try to assign + * if (error) { + * rw_exit(...); // drop locks + * zfs_dirent_unlock(dl); // unlock directory entry + * VN_RELE(...); // release held vnodes + * if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + * dmu_tx_wait(tx); + * dmu_tx_abort(tx); + * goto top; + * } + * dmu_tx_abort(tx); // abort DMU tx + * ZFS_EXIT(zfsvfs); // finished in zfs + * return (error); // really out of space + * } + * error = do_real_work(); // do whatever this VOP does + * if (error == 0) + * zfs_log_*(...); // on success, make ZIL entry + * dmu_tx_commit(tx); // commit DMU tx -- error or not + * rw_exit(...); // drop locks + * zfs_dirent_unlock(dl); // unlock directory entry + * VN_RELE(...); // release held vnodes + * zil_commit(zilog, seq, foid); // synchronous when necessary + * ZFS_EXIT(zfsvfs); // finished in zfs + * return (error); // done, report error + */ + +/* ARGSUSED */ +static int +zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(*vpp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) && + ((flag & FAPPEND) == 0)) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && + ZTOV(zp)->v_type == VREG && + !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && + zp->z_phys->zp_size > 0) { + if (fs_vscan(*vpp, cr, 0) != 0) { + ZFS_EXIT(zfsvfs); + return (EACCES); + } + } + + /* Keep a count of the synchronous opens in the znode */ + if (flag & (FSYNC | FDSYNC)) + atomic_inc_32(&zp->z_sync_cnt); + + ZFS_EXIT(zfsvfs); + return (0); +} + +/* ARGSUSED */ +static int +zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + /* Decrement the synchronous opens in the znode */ + if ((flag & (FSYNC | FDSYNC)) && (count == 1)) + atomic_dec_32(&zp->z_sync_cnt); + + /* + * Clean up any locks held by this process on the vp. + */ + cleanlocks(vp, ddi_get_pid(), 0); + cleanshares(vp, ddi_get_pid()); + + if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && + ZTOV(zp)->v_type == VREG && + !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && + zp->z_phys->zp_size > 0) + VERIFY(fs_vscan(vp, cr, 1) == 0); + + ZFS_EXIT(zfsvfs); + return (0); +} + +/* + * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and + * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. + */ +static int +zfs_holey(vnode_t *vp, int cmd, offset_t *off) +{ + znode_t *zp = VTOZ(vp); + uint64_t noff = (uint64_t)*off; /* new offset */ + uint64_t file_sz; + int error; + boolean_t hole; + + file_sz = zp->z_phys->zp_size; + if (noff >= file_sz) { + return (ENXIO); + } + + if (cmd == _FIO_SEEK_HOLE) + hole = B_TRUE; + else + hole = B_FALSE; + + error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); + + /* end of file? */ + if ((error == ESRCH) || (noff > file_sz)) { + /* + * Handle the virtual hole at the end of file. + */ + if (hole) { + *off = file_sz; + return (0); + } + return (ENXIO); + } + + if (noff < *off) + return (error); + *off = noff; + return (error); +} + +/* ARGSUSED */ +static int +zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, + int *rvalp, caller_context_t *ct) +{ + offset_t off; + int error; + zfsvfs_t *zfsvfs; + znode_t *zp; + + switch (com) { + case _FIOFFS: + return (zfs_sync(vp->v_vfsp, 0, cred)); + + /* + * The following two ioctls are used by bfu. Faking out, + * necessary to avoid bfu errors. + */ + case _FIOGDIO: + case _FIOSDIO: + return (0); + + case _FIO_SEEK_DATA: + case _FIO_SEEK_HOLE: + if (ddi_copyin((void *)data, &off, sizeof (off), flag)) + return (EFAULT); + + zp = VTOZ(vp); + zfsvfs = zp->z_zfsvfs; + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + /* offset parameter is in/out */ + error = zfs_holey(vp, com, &off); + ZFS_EXIT(zfsvfs); + if (error) + return (error); + if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) + return (EFAULT); + return (0); + } + return (ENOTTY); +} + +/* + * Utility functions to map and unmap a single physical page. These + * are used to manage the mappable copies of ZFS file data, and therefore + * do not update ref/mod bits. + */ +caddr_t +zfs_map_page(page_t *pp, enum seg_rw rw) +{ + if (kpm_enable) + return (hat_kpm_mapin(pp, 0)); + ASSERT(rw == S_READ || rw == S_WRITE); + return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), + (caddr_t)-1)); +} + +void +zfs_unmap_page(page_t *pp, caddr_t addr) +{ + if (kpm_enable) { + hat_kpm_mapout(pp, 0, addr); + } else { + ppmapout(addr); + } +} + +/* + * When a file is memory mapped, we must keep the IO data synchronized + * between the DMU cache and the memory mapped pages. What this means: + * + * On Write: If we find a memory mapped page, we write to *both* + * the page and the dmu buffer. + * + * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when + * the file is memory mapped. + */ +static int +mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int64_t start, off; + int len = nbytes; + int error = 0; + + start = uio->uio_loffset; + off = start & PAGEOFFSET; + for (start &= PAGEMASK; len > 0; start += PAGESIZE) { + page_t *pp; + uint64_t bytes = MIN(PAGESIZE - off, len); + uint64_t woff = uio->uio_loffset; + + /* + * We don't want a new page to "appear" in the middle of + * the file update (because it may not get the write + * update data), so we grab a lock to block + * zfs_getpage(). + */ + rw_enter(&zp->z_map_lock, RW_WRITER); + if (pp = page_lookup(vp, start, SE_SHARED)) { + caddr_t va; + + rw_exit(&zp->z_map_lock); + va = zfs_map_page(pp, S_WRITE); + error = uiomove(va+off, bytes, UIO_WRITE, uio); + if (error == 0) { + dmu_write(zfsvfs->z_os, zp->z_id, + woff, bytes, va+off, tx); + } + zfs_unmap_page(pp, va); + page_unlock(pp); + } else { + error = dmu_write_uio(zfsvfs->z_os, zp->z_id, + uio, bytes, tx); + rw_exit(&zp->z_map_lock); + } + len -= bytes; + off = 0; + if (error) + break; + } + return (error); +} + +/* + * When a file is memory mapped, we must keep the IO data synchronized + * between the DMU cache and the memory mapped pages. What this means: + * + * On Read: We "read" preferentially from memory mapped pages, + * else we default from the dmu buffer. + * + * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when + * the file is memory mapped. + */ +static int +mappedread(vnode_t *vp, int nbytes, uio_t *uio) +{ + znode_t *zp = VTOZ(vp); + objset_t *os = zp->z_zfsvfs->z_os; + int64_t start, off; + int len = nbytes; + int error = 0; + + start = uio->uio_loffset; + off = start & PAGEOFFSET; + for (start &= PAGEMASK; len > 0; start += PAGESIZE) { + page_t *pp; + uint64_t bytes = MIN(PAGESIZE - off, len); + + if (pp = page_lookup(vp, start, SE_SHARED)) { + caddr_t va; + + va = zfs_map_page(pp, S_READ); + error = uiomove(va + off, bytes, UIO_READ, uio); + zfs_unmap_page(pp, va); + page_unlock(pp); + } else { + error = dmu_read_uio(os, zp->z_id, uio, bytes); + } + len -= bytes; + off = 0; + if (error) + break; + } + return (error); +} + +offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ + +/* + * Read bytes from specified file into supplied buffer. + * + * IN: vp - vnode of file to be read from. + * uio - structure supplying read location, range info, + * and return buffer. + * ioflag - SYNC flags; used to provide FRSYNC semantics. + * cr - credentials of caller. + * ct - caller context + * + * OUT: uio - updated offset and range, buffer filled. + * + * RETURN: 0 if success + * error code if failure + * + * Side Effects: + * vp - atime updated if byte count > 0 + */ +/* ARGSUSED */ +static int +zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + objset_t *os; + ssize_t n, nbytes; + int error; + rl_t *rl; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + os = zfsvfs->z_os; + + if (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) { + ZFS_EXIT(zfsvfs); + return (EACCES); + } + + /* + * Validate file offset + */ + if (uio->uio_loffset < (offset_t)0) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * Fasttrack empty reads + */ + if (uio->uio_resid == 0) { + ZFS_EXIT(zfsvfs); + return (0); + } + + /* + * Check for mandatory locks + */ + if (MANDMODE((mode_t)zp->z_phys->zp_mode)) { + if (error = chklock(vp, FREAD, + uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { + ZFS_EXIT(zfsvfs); + return (error); + } + } + + /* + * If we're in FRSYNC mode, sync out this znode before reading it. + */ + if (ioflag & FRSYNC) + zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); + + /* + * Lock the range against changes. + */ + rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); + + /* + * If we are reading past end-of-file we can skip + * to the end; but we might still need to set atime. + */ + if (uio->uio_loffset >= zp->z_phys->zp_size) { + error = 0; + goto out; + } + + ASSERT(uio->uio_loffset < zp->z_phys->zp_size); + n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset); + + while (n > 0) { + nbytes = MIN(n, zfs_read_chunk_size - + P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); + + if (vn_has_cached_data(vp)) + error = mappedread(vp, nbytes, uio); + else + error = dmu_read_uio(os, zp->z_id, uio, nbytes); + if (error) { + /* convert checksum errors into IO errors */ + if (error == ECKSUM) + error = EIO; + break; + } + + n -= nbytes; + } + +out: + zfs_range_unlock(rl); + + ZFS_ACCESSTIME_STAMP(zfsvfs, zp); + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Write the bytes to a file. + * + * IN: vp - vnode of file to be written to. + * uio - structure supplying write location, range info, + * and data buffer. + * ioflag - FAPPEND flag set if in append mode. + * cr - credentials of caller. + * ct - caller context (NFS/CIFS fem monitor only) + * + * OUT: uio - updated offset and range. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - ctime|mtime updated if byte count > 0 + */ +/* ARGSUSED */ +static int +zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + rlim64_t limit = uio->uio_llimit; + ssize_t start_resid = uio->uio_resid; + ssize_t tx_bytes; + uint64_t end_size; + dmu_tx_t *tx; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zilog_t *zilog; + offset_t woff; + ssize_t n, nbytes; + rl_t *rl; + int max_blksz = zfsvfs->z_max_blksz; + uint64_t pflags; + int error; + + /* + * Fasttrack empty write + */ + n = start_resid; + if (n == 0) + return (0); + + if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) + limit = MAXOFFSET_T; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + /* + * If immutable or not appending then return EPERM + */ + pflags = zp->z_phys->zp_flags; + if ((pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || + ((pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && + (uio->uio_loffset < zp->z_phys->zp_size))) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + zilog = zfsvfs->z_log; + + /* + * Pre-fault the pages to ensure slow (eg NFS) pages + * don't hold up txg. + */ + uio_prefaultpages(n, uio); + + /* + * If in append mode, set the io offset pointer to eof. + */ + if (ioflag & FAPPEND) { + /* + * Range lock for a file append: + * The value for the start of range will be determined by + * zfs_range_lock() (to guarantee append semantics). + * If this write will cause the block size to increase, + * zfs_range_lock() will lock the entire file, so we must + * later reduce the range after we grow the block size. + */ + rl = zfs_range_lock(zp, 0, n, RL_APPEND); + if (rl->r_len == UINT64_MAX) { + /* overlocked, zp_size can't change */ + woff = uio->uio_loffset = zp->z_phys->zp_size; + } else { + woff = uio->uio_loffset = rl->r_off; + } + } else { + woff = uio->uio_loffset; + /* + * Validate file offset + */ + if (woff < 0) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * If we need to grow the block size then zfs_range_lock() + * will lock a wider range than we request here. + * Later after growing the block size we reduce the range. + */ + rl = zfs_range_lock(zp, woff, n, RL_WRITER); + } + + if (woff >= limit) { + zfs_range_unlock(rl); + ZFS_EXIT(zfsvfs); + return (EFBIG); + } + + if ((woff + n) > limit || woff > (limit - n)) + n = limit - woff; + + /* + * Check for mandatory locks + */ + if (MANDMODE((mode_t)zp->z_phys->zp_mode) && + (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { + zfs_range_unlock(rl); + ZFS_EXIT(zfsvfs); + return (error); + } + end_size = MAX(zp->z_phys->zp_size, woff + n); + + /* + * Write the file in reasonable size chunks. Each chunk is written + * in a separate transaction; this keeps the intent log records small + * and allows us to do more fine-grained space accounting. + */ + while (n > 0) { + /* + * Start a transaction. + */ + woff = uio->uio_loffset; + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, zp->z_id); + dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + if (error == ERESTART && + zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + continue; + } + dmu_tx_abort(tx); + break; + } + + /* + * If zfs_range_lock() over-locked we grow the blocksize + * and then reduce the lock range. This will only happen + * on the first iteration since zfs_range_reduce() will + * shrink down r_len to the appropriate size. + */ + if (rl->r_len == UINT64_MAX) { + uint64_t new_blksz; + + if (zp->z_blksz > max_blksz) { + ASSERT(!ISP2(zp->z_blksz)); + new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE); + } else { + new_blksz = MIN(end_size, max_blksz); + } + zfs_grow_blocksize(zp, new_blksz, tx); + zfs_range_reduce(rl, woff, n); + } + + /* + * XXX - should we really limit each write to z_max_blksz? + * Perhaps we should use SPA_MAXBLOCKSIZE chunks? + */ + nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); + rw_enter(&zp->z_map_lock, RW_READER); + + tx_bytes = uio->uio_resid; + if (vn_has_cached_data(vp)) { + rw_exit(&zp->z_map_lock); + error = mappedwrite(vp, nbytes, uio, tx); + } else { + error = dmu_write_uio(zfsvfs->z_os, zp->z_id, + uio, nbytes, tx); + rw_exit(&zp->z_map_lock); + } + tx_bytes -= uio->uio_resid; + + /* + * If we made no progress, we're done. If we made even + * partial progress, update the znode and ZIL accordingly. + */ + if (tx_bytes == 0) { + dmu_tx_commit(tx); + ASSERT(error != 0); + break; + } + + /* + * Clear Set-UID/Set-GID bits on successful write if not + * privileged and at least one of the excute bits is set. + * + * It would be nice to to this after all writes have + * been done, but that would still expose the ISUID/ISGID + * to another app after the partial write is committed. + * + * Note: we don't call zfs_fuid_map_id() here because + * user 0 is not an ephemeral uid. + */ + mutex_enter(&zp->z_acl_lock); + if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) | + (S_IXUSR >> 6))) != 0 && + (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 && + secpolicy_vnode_setid_retain(cr, + (zp->z_phys->zp_mode & S_ISUID) != 0 && + zp->z_phys->zp_uid == 0) != 0) { + zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID); + } + mutex_exit(&zp->z_acl_lock); + + /* + * Update time stamp. NOTE: This marks the bonus buffer as + * dirty, so we don't have to do it again for zp_size. + */ + zfs_time_stamper(zp, CONTENT_MODIFIED, tx); + + /* + * Update the file size (zp_size) if it has changed; + * account for possible concurrent updates. + */ + while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset) + (void) atomic_cas_64(&zp->z_phys->zp_size, end_size, + uio->uio_loffset); + zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); + dmu_tx_commit(tx); + + if (error != 0) + break; + ASSERT(tx_bytes == nbytes); + n -= nbytes; + } + + zfs_range_unlock(rl); + + /* + * If we're in replay mode, or we made no progress, return error. + * Otherwise, it's at least a partial write, so it's successful. + */ + if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) { + ZFS_EXIT(zfsvfs); + return (error); + } + + if (ioflag & (FSYNC | FDSYNC)) + zil_commit(zilog, zp->z_last_itx, zp->z_id); + + ZFS_EXIT(zfsvfs); + return (0); +} + +void +zfs_get_done(dmu_buf_t *db, void *vzgd) +{ + zgd_t *zgd = (zgd_t *)vzgd; + rl_t *rl = zgd->zgd_rl; + vnode_t *vp = ZTOV(rl->r_zp); + + dmu_buf_rele(db, vzgd); + zfs_range_unlock(rl); + VN_RELE(vp); + zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); + kmem_free(zgd, sizeof (zgd_t)); +} + +/* + * Get data to generate a TX_WRITE intent log record. + */ +int +zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) +{ + zfsvfs_t *zfsvfs = arg; + objset_t *os = zfsvfs->z_os; + znode_t *zp; + uint64_t off = lr->lr_offset; + dmu_buf_t *db; + rl_t *rl; + zgd_t *zgd; + int dlen = lr->lr_length; /* length of user data */ + int error = 0; + + ASSERT(zio); + ASSERT(dlen != 0); + + /* + * Nothing to do if the file has been removed + */ + if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0) + return (ENOENT); + if (zp->z_unlinked) { + VN_RELE(ZTOV(zp)); + return (ENOENT); + } + + /* + * Write records come in two flavors: immediate and indirect. + * For small writes it's cheaper to store the data with the + * log record (immediate); for large writes it's cheaper to + * sync the data and get a pointer to it (indirect) so that + * we don't have to write the data twice. + */ + if (buf != NULL) { /* immediate write */ + rl = zfs_range_lock(zp, off, dlen, RL_READER); + /* test for truncation needs to be done while range locked */ + if (off >= zp->z_phys->zp_size) { + error = ENOENT; + goto out; + } + VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf)); + } else { /* indirect write */ + uint64_t boff; /* block starting offset */ + + /* + * Have to lock the whole block to ensure when it's + * written out and it's checksum is being calculated + * that no one can change the data. We need to re-check + * blocksize after we get the lock in case it's changed! + */ + for (;;) { + if (ISP2(zp->z_blksz)) { + boff = P2ALIGN_TYPED(off, zp->z_blksz, + uint64_t); + } else { + boff = 0; + } + dlen = zp->z_blksz; + rl = zfs_range_lock(zp, boff, dlen, RL_READER); + if (zp->z_blksz == dlen) + break; + zfs_range_unlock(rl); + } + /* test for truncation needs to be done while range locked */ + if (off >= zp->z_phys->zp_size) { + error = ENOENT; + goto out; + } + zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); + zgd->zgd_rl = rl; + zgd->zgd_zilog = zfsvfs->z_log; + zgd->zgd_bp = &lr->lr_blkptr; + VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db)); + ASSERT(boff == db->db_offset); + lr->lr_blkoff = off - boff; + error = dmu_sync(zio, db, &lr->lr_blkptr, + lr->lr_common.lrc_txg, zfs_get_done, zgd); + ASSERT((error && error != EINPROGRESS) || + lr->lr_length <= zp->z_blksz); + if (error == 0) + zil_add_block(zfsvfs->z_log, &lr->lr_blkptr); + /* + * If we get EINPROGRESS, then we need to wait for a + * write IO initiated by dmu_sync() to complete before + * we can release this dbuf. We will finish everything + * up in the zfs_get_done() callback. + */ + if (error == EINPROGRESS) + return (0); + dmu_buf_rele(db, zgd); + kmem_free(zgd, sizeof (zgd_t)); + } +out: + zfs_range_unlock(rl); + VN_RELE(ZTOV(zp)); + return (error); +} + +/*ARGSUSED*/ +static int +zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int error; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if (flag & V_ACE_MASK) + error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); + else + error = zfs_zaccess_rwx(zp, mode, flag, cr); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Lookup an entry in a directory, or an extended attribute directory. + * If it exists, return a held vnode reference for it. + * + * IN: dvp - vnode of directory to search. + * nm - name of entry to lookup. + * pnp - full pathname to lookup [UNUSED]. + * flags - LOOKUP_XATTR set if looking for an attribute. + * rdir - root directory vnode [UNUSED]. + * cr - credentials of caller. + * ct - caller context + * direntflags - directory lookup flags + * realpnp - returned pathname. + * + * OUT: vpp - vnode of located entry, NULL if not found. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * NA + */ +/* ARGSUSED */ +static int +zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, + int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, + int *direntflags, pathname_t *realpnp) +{ + znode_t *zdp = VTOZ(dvp); + zfsvfs_t *zfsvfs = zdp->z_zfsvfs; + int error; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zdp); + + *vpp = NULL; + + if (flags & LOOKUP_XATTR) { + /* + * If the xattr property is off, refuse the lookup request. + */ + if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * We don't allow recursive attributes.. + * Maybe someday we will. + */ + if (zdp->z_phys->zp_flags & ZFS_XATTR) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + /* + * Do we have permission to get into attribute directory? + */ + + if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, + B_FALSE, cr)) { + VN_RELE(*vpp); + *vpp = NULL; + } + + ZFS_EXIT(zfsvfs); + return (error); + } + + if (dvp->v_type != VDIR) { + ZFS_EXIT(zfsvfs); + return (ENOTDIR); + } + + /* + * Check accessibility of directory. + */ + + if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), + NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + + error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); + if (error == 0) { + /* + * Convert device special files + */ + if (IS_DEVVP(*vpp)) { + vnode_t *svp; + + svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); + VN_RELE(*vpp); + if (svp == NULL) + error = ENOSYS; + else + *vpp = svp; + } + } + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Attempt to create a new entry in a directory. If the entry + * already exists, truncate the file if permissible, else return + * an error. Return the vp of the created or trunc'd file. + * + * IN: dvp - vnode of directory to put new file entry in. + * name - name of new file entry. + * vap - attributes of new file. + * excl - flag indicating exclusive or non-exclusive mode. + * mode - mode to open file with. + * cr - credentials of caller. + * flag - large file flag [UNUSED]. + * ct - caller context + * vsecp - ACL to be set + * + * OUT: vpp - vnode of created or trunc'd entry. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * dvp - ctime|mtime updated if new entry created + * vp - ctime|mtime always, atime if new + */ + +/* ARGSUSED */ +static int +zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, + int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, + vsecattr_t *vsecp) +{ + znode_t *zp, *dzp = VTOZ(dvp); + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + objset_t *os; + zfs_dirlock_t *dl; + dmu_tx_t *tx; + int error; + zfs_acl_t *aclp = NULL; + zfs_fuid_info_t *fuidp = NULL; + ksid_t *ksid; + uid_t uid; + gid_t gid = crgetgid(cr); + + /* + * If we have an ephemeral id, ACL, or XVATTR then + * make sure file system is at proper version + */ + + ksid = crgetsid(cr, KSID_OWNER); + if (ksid) + uid = ksid_getid(ksid); + else + uid = crgetuid(cr); + + if (zfsvfs->z_use_fuids == B_FALSE && + (vsecp || (vap->va_mask & AT_XVATTR) || + IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) + return (EINVAL); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + os = zfsvfs->z_os; + zilog = zfsvfs->z_log; + + if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), + NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + + if (vap->va_mask & AT_XVATTR) { + if ((error = secpolicy_xvattr((xvattr_t *)vap, + crgetuid(cr), cr, vap->va_type)) != 0) { + ZFS_EXIT(zfsvfs); + return (error); + } + } +top: + *vpp = NULL; + + if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) + vap->va_mode &= ~VSVTX; + + if (*name == '\0') { + /* + * Null component name refers to the directory itself. + */ + VN_HOLD(dvp); + zp = dzp; + dl = NULL; + error = 0; + } else { + /* possible VN_HOLD(zp) */ + int zflg = 0; + + if (flag & FIGNORECASE) + zflg |= ZCILOOK; + + error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, + NULL, NULL); + if (error) { + if (strcmp(name, "..") == 0) + error = EISDIR; + ZFS_EXIT(zfsvfs); + if (aclp) + zfs_acl_free(aclp); + return (error); + } + } + if (vsecp && aclp == NULL) { + error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); + if (error) { + ZFS_EXIT(zfsvfs); + if (dl) + zfs_dirent_unlock(dl); + return (error); + } + } + + if (zp == NULL) { + uint64_t txtype; + + /* + * Create a new file object and update the directory + * to reference it. + */ + if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { + goto out; + } + + /* + * We only support the creation of regular files in + * extended attribute directories. + */ + if ((dzp->z_phys->zp_flags & ZFS_XATTR) && + (vap->va_type != VREG)) { + error = EINVAL; + goto out; + } + + tx = dmu_tx_create(os); + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || + IS_EPHEMERAL(gid)) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, + FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + dmu_tx_hold_bonus(tx, dzp->z_id); + dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); + if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) { + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, + 0, SPA_MAXBLOCKSIZE); + } + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + zfs_dirent_unlock(dl); + if (error == ERESTART && + zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + if (aclp) + zfs_acl_free(aclp); + return (error); + } + zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); + (void) zfs_link_create(dl, zp, tx, ZNEW); + txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); + if (flag & FIGNORECASE) + txtype |= TX_CI; + zfs_log_create(zilog, tx, txtype, dzp, zp, name, + vsecp, fuidp, vap); + if (fuidp) + zfs_fuid_info_free(fuidp); + dmu_tx_commit(tx); + } else { + int aflags = (flag & FAPPEND) ? V_APPEND : 0; + + /* + * A directory entry already exists for this name. + */ + /* + * Can't truncate an existing file if in exclusive mode. + */ + if (excl == EXCL) { + error = EEXIST; + goto out; + } + /* + * Can't open a directory for writing. + */ + if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { + error = EISDIR; + goto out; + } + /* + * Verify requested access to file. + */ + if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { + goto out; + } + + mutex_enter(&dzp->z_lock); + dzp->z_seq++; + mutex_exit(&dzp->z_lock); + + /* + * Truncate regular files if requested. + */ + if ((ZTOV(zp)->v_type == VREG) && + (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { + /* we can't hold any locks when calling zfs_freesp() */ + zfs_dirent_unlock(dl); + dl = NULL; + error = zfs_freesp(zp, 0, 0, mode, TRUE); + if (error == 0) { + vnevent_create(ZTOV(zp), ct); + } + } + } +out: + + if (dl) + zfs_dirent_unlock(dl); + + if (error) { + if (zp) + VN_RELE(ZTOV(zp)); + } else { + *vpp = ZTOV(zp); + /* + * If vnode is for a device return a specfs vnode instead. + */ + if (IS_DEVVP(*vpp)) { + struct vnode *svp; + + svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); + VN_RELE(*vpp); + if (svp == NULL) { + error = ENOSYS; + } + *vpp = svp; + } + } + if (aclp) + zfs_acl_free(aclp); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Remove an entry from a directory. + * + * IN: dvp - vnode of directory to remove entry from. + * name - name of entry to remove. + * cr - credentials of caller. + * ct - caller context + * flags - case flags + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * dvp - ctime|mtime + * vp - ctime (if nlink > 0) + */ +/*ARGSUSED*/ +static int +zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, + int flags) +{ + znode_t *zp, *dzp = VTOZ(dvp); + znode_t *xzp = NULL; + vnode_t *vp; + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + uint64_t acl_obj, xattr_obj; + zfs_dirlock_t *dl; + dmu_tx_t *tx; + boolean_t may_delete_now, delete_now = FALSE; + boolean_t unlinked, toobig = FALSE; + uint64_t txtype; + pathname_t *realnmp = NULL; + pathname_t realnm; + int error; + int zflg = ZEXISTS; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + zilog = zfsvfs->z_log; + + if (flags & FIGNORECASE) { + zflg |= ZCILOOK; + pn_alloc(&realnm); + realnmp = &realnm; + } + +top: + /* + * Attempt to lock directory; fail if entry doesn't exist. + */ + if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, + NULL, realnmp)) { + if (realnmp) + pn_free(realnmp); + ZFS_EXIT(zfsvfs); + return (error); + } + + vp = ZTOV(zp); + + if (error = zfs_zaccess_delete(dzp, zp, cr)) { + goto out; + } + + /* + * Need to use rmdir for removing directories. + */ + if (vp->v_type == VDIR) { + error = EPERM; + goto out; + } + + vnevent_remove(vp, dvp, name, ct); + + if (realnmp) + dnlc_remove(dvp, realnmp->pn_buf); + else + dnlc_remove(dvp, name); + + mutex_enter(&vp->v_lock); + may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); + mutex_exit(&vp->v_lock); + + /* + * We may delete the znode now, or we may put it in the unlinked set; + * it depends on whether we're the last link, and on whether there are + * other holds on the vnode. So we dmu_tx_hold() the right things to + * allow for either case. + */ + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); + dmu_tx_hold_bonus(tx, zp->z_id); + if (may_delete_now) { + toobig = + zp->z_phys->zp_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; + /* if the file is too big, only hold_free a token amount */ + dmu_tx_hold_free(tx, zp->z_id, 0, + (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); + } + + /* are there any extended attributes? */ + if ((xattr_obj = zp->z_phys->zp_xattr) != 0) { + /* XXX - do we need this if we are deleting? */ + dmu_tx_hold_bonus(tx, xattr_obj); + } + + /* are there any additional acls */ + if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 && + may_delete_now) + dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); + + /* charge as an update -- would be nice not to charge at all */ + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + zfs_dirent_unlock(dl); + VN_RELE(vp); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + if (realnmp) + pn_free(realnmp); + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (error); + } + + /* + * Remove the directory entry. + */ + error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); + + if (error) { + dmu_tx_commit(tx); + goto out; + } + + if (unlinked) { + mutex_enter(&vp->v_lock); + delete_now = may_delete_now && !toobig && + vp->v_count == 1 && !vn_has_cached_data(vp) && + zp->z_phys->zp_xattr == xattr_obj && + zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj; + mutex_exit(&vp->v_lock); + } + + if (delete_now) { + if (zp->z_phys->zp_xattr) { + error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); + ASSERT3U(error, ==, 0); + ASSERT3U(xzp->z_phys->zp_links, ==, 2); + dmu_buf_will_dirty(xzp->z_dbuf, tx); + mutex_enter(&xzp->z_lock); + xzp->z_unlinked = 1; + xzp->z_phys->zp_links = 0; + mutex_exit(&xzp->z_lock); + zfs_unlinked_add(xzp, tx); + zp->z_phys->zp_xattr = 0; /* probably unnecessary */ + } + mutex_enter(&zp->z_lock); + mutex_enter(&vp->v_lock); + vp->v_count--; + ASSERT3U(vp->v_count, ==, 0); + mutex_exit(&vp->v_lock); + mutex_exit(&zp->z_lock); + zfs_znode_delete(zp, tx); + } else if (unlinked) { + zfs_unlinked_add(zp, tx); + } + + txtype = TX_REMOVE; + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_remove(zilog, tx, txtype, dzp, name); + + dmu_tx_commit(tx); +out: + if (realnmp) + pn_free(realnmp); + + zfs_dirent_unlock(dl); + + if (!delete_now) { + VN_RELE(vp); + } else if (xzp) { + /* this rele is delayed to prevent nesting transactions */ + VN_RELE(ZTOV(xzp)); + } + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Create a new directory and insert it into dvp using the name + * provided. Return a pointer to the inserted directory. + * + * IN: dvp - vnode of directory to add subdir to. + * dirname - name of new directory. + * vap - attributes of new directory. + * cr - credentials of caller. + * ct - caller context + * vsecp - ACL to be set + * + * OUT: vpp - vnode of created directory. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * dvp - ctime|mtime updated + * vp - ctime|mtime|atime updated + */ +/*ARGSUSED*/ +static int +zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, + caller_context_t *ct, int flags, vsecattr_t *vsecp) +{ + znode_t *zp, *dzp = VTOZ(dvp); + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + zfs_dirlock_t *dl; + uint64_t txtype; + dmu_tx_t *tx; + int error; + zfs_acl_t *aclp = NULL; + zfs_fuid_info_t *fuidp = NULL; + int zf = ZNEW; + ksid_t *ksid; + uid_t uid; + gid_t gid = crgetgid(cr); + + ASSERT(vap->va_type == VDIR); + + /* + * If we have an ephemeral id, ACL, or XVATTR then + * make sure file system is at proper version + */ + + ksid = crgetsid(cr, KSID_OWNER); + if (ksid) + uid = ksid_getid(ksid); + else + uid = crgetuid(cr); + if (zfsvfs->z_use_fuids == B_FALSE && + (vsecp || (vap->va_mask & AT_XVATTR) || + IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) + return (EINVAL); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + zilog = zfsvfs->z_log; + + if (dzp->z_phys->zp_flags & ZFS_XATTR) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + if (zfsvfs->z_utf8 && u8_validate(dirname, + strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + if (flags & FIGNORECASE) + zf |= ZCILOOK; + + if (vap->va_mask & AT_XVATTR) + if ((error = secpolicy_xvattr((xvattr_t *)vap, + crgetuid(cr), cr, vap->va_type)) != 0) { + ZFS_EXIT(zfsvfs); + return (error); + } + + /* + * First make sure the new directory doesn't exist. + */ +top: + *vpp = NULL; + + if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, + NULL, NULL)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { + zfs_dirent_unlock(dl); + ZFS_EXIT(zfsvfs); + return (error); + } + + if (vsecp && aclp == NULL) { + error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); + if (error) { + zfs_dirent_unlock(dl); + ZFS_EXIT(zfsvfs); + return (error); + } + } + /* + * Add a new entry to the directory. + */ + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); + dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); + if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || + IS_EPHEMERAL(gid)) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, + 0, SPA_MAXBLOCKSIZE); + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + zfs_dirent_unlock(dl); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + if (aclp) + zfs_acl_free(aclp); + return (error); + } + + /* + * Create new node. + */ + zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); + + if (aclp) + zfs_acl_free(aclp); + + /* + * Now put new name in parent dir. + */ + (void) zfs_link_create(dl, zp, tx, ZNEW); + + *vpp = ZTOV(zp); + + txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, fuidp, vap); + + if (fuidp) + zfs_fuid_info_free(fuidp); + dmu_tx_commit(tx); + + zfs_dirent_unlock(dl); + + ZFS_EXIT(zfsvfs); + return (0); +} + +/* + * Remove a directory subdir entry. If the current working + * directory is the same as the subdir to be removed, the + * remove will fail. + * + * IN: dvp - vnode of directory to remove from. + * name - name of directory to be removed. + * cwd - vnode of current working directory. + * cr - credentials of caller. + * ct - caller context + * flags - case flags + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * dvp - ctime|mtime updated + */ +/*ARGSUSED*/ +static int +zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, + caller_context_t *ct, int flags) +{ + znode_t *dzp = VTOZ(dvp); + znode_t *zp; + vnode_t *vp; + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + zfs_dirlock_t *dl; + dmu_tx_t *tx; + int error; + int zflg = ZEXISTS; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + zilog = zfsvfs->z_log; + + if (flags & FIGNORECASE) + zflg |= ZCILOOK; +top: + zp = NULL; + + /* + * Attempt to lock directory; fail if entry doesn't exist. + */ + if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, + NULL, NULL)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + vp = ZTOV(zp); + + if (error = zfs_zaccess_delete(dzp, zp, cr)) { + goto out; + } + + if (vp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + + if (vp == cwd) { + error = EINVAL; + goto out; + } + + vnevent_rmdir(vp, dvp, name, ct); + + /* + * Grab a lock on the directory to make sure that noone is + * trying to add (or lookup) entries while we are removing it. + */ + rw_enter(&zp->z_name_lock, RW_WRITER); + + /* + * Grab a lock on the parent pointer to make sure we play well + * with the treewalk and directory rename code. + */ + rw_enter(&zp->z_parent_lock, RW_WRITER); + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); + dmu_tx_hold_bonus(tx, zp->z_id); + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + rw_exit(&zp->z_parent_lock); + rw_exit(&zp->z_name_lock); + zfs_dirent_unlock(dl); + VN_RELE(vp); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (error); + } + + error = zfs_link_destroy(dl, zp, tx, zflg, NULL); + + if (error == 0) { + uint64_t txtype = TX_RMDIR; + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_remove(zilog, tx, txtype, dzp, name); + } + + dmu_tx_commit(tx); + + rw_exit(&zp->z_parent_lock); + rw_exit(&zp->z_name_lock); +out: + zfs_dirent_unlock(dl); + + VN_RELE(vp); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Read as many directory entries as will fit into the provided + * buffer from the given directory cursor position (specified in + * the uio structure. + * + * IN: vp - vnode of directory to read. + * uio - structure supplying read location, range info, + * and return buffer. + * cr - credentials of caller. + * ct - caller context + * flags - case flags + * + * OUT: uio - updated offset and range, buffer filled. + * eofp - set to true if end-of-file detected. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - atime updated + * + * Note that the low 4 bits of the cookie returned by zap is always zero. + * This allows us to use the low range for "special" directory entries: + * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, + * we use the offset 2 for the '.zfs' directory. + */ +/* ARGSUSED */ +static int +zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, + caller_context_t *ct, int flags) +{ + znode_t *zp = VTOZ(vp); + iovec_t *iovp; + edirent_t *eodp; + dirent64_t *odp; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + objset_t *os; + caddr_t outbuf; + size_t bufsize; + zap_cursor_t zc; + zap_attribute_t zap; + uint_t bytes_wanted; + uint64_t offset; /* must be unsigned; checks for < 1 */ + int local_eof; + int outcount; + int error; + uint8_t prefetch; + boolean_t check_sysattrs; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + /* + * If we are not given an eof variable, + * use a local one. + */ + if (eofp == NULL) + eofp = &local_eof; + + /* + * Check for valid iov_len. + */ + if (uio->uio_iov->iov_len <= 0) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * Quit if directory has been removed (posix) + */ + if ((*eofp = zp->z_unlinked) != 0) { + ZFS_EXIT(zfsvfs); + return (0); + } + + error = 0; + os = zfsvfs->z_os; + offset = uio->uio_loffset; + prefetch = zp->z_zn_prefetch; + + /* + * Initialize the iterator cursor. + */ + if (offset <= 3) { + /* + * Start iteration from the beginning of the directory. + */ + zap_cursor_init(&zc, os, zp->z_id); + } else { + /* + * The offset is a serialized cursor. + */ + zap_cursor_init_serialized(&zc, os, zp->z_id, offset); + } + + /* + * Get space to change directory entries into fs independent format. + */ + iovp = uio->uio_iov; + bytes_wanted = iovp->iov_len; + if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { + bufsize = bytes_wanted; + outbuf = kmem_alloc(bufsize, KM_SLEEP); + odp = (struct dirent64 *)outbuf; + } else { + bufsize = bytes_wanted; + odp = (struct dirent64 *)iovp->iov_base; + } + eodp = (struct edirent *)odp; + + /* + * If this VFS supports the system attribute view interface; and + * we're looking at an extended attribute directory; and we care + * about normalization conflicts on this vfs; then we must check + * for normalization conflicts with the sysattr name space. + */ + check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && + (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && + (flags & V_RDDIR_ENTFLAGS); + + /* + * Transform to file-system independent format + */ + outcount = 0; + while (outcount < bytes_wanted) { + ino64_t objnum; + ushort_t reclen; + off64_t *next; + + /* + * Special case `.', `..', and `.zfs'. + */ + if (offset == 0) { + (void) strcpy(zap.za_name, "."); + zap.za_normalization_conflict = 0; + objnum = zp->z_id; + } else if (offset == 1) { + (void) strcpy(zap.za_name, ".."); + zap.za_normalization_conflict = 0; + objnum = zp->z_phys->zp_parent; + } else if (offset == 2 && zfs_show_ctldir(zp)) { + (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); + zap.za_normalization_conflict = 0; + objnum = ZFSCTL_INO_ROOT; + } else { + /* + * Grab next entry. + */ + if (error = zap_cursor_retrieve(&zc, &zap)) { + if ((*eofp = (error == ENOENT)) != 0) + break; + else + goto update; + } + + if (zap.za_integer_length != 8 || + zap.za_num_integers != 1) { + cmn_err(CE_WARN, "zap_readdir: bad directory " + "entry, obj = %lld, offset = %lld\n", + (u_longlong_t)zp->z_id, + (u_longlong_t)offset); + error = ENXIO; + goto update; + } + + objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); + /* + * MacOS X can extract the object type here such as: + * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); + */ + + if (check_sysattrs && !zap.za_normalization_conflict) { + zap.za_normalization_conflict = + xattr_sysattr_casechk(zap.za_name); + } + } + + if (flags & V_RDDIR_ENTFLAGS) + reclen = EDIRENT_RECLEN(strlen(zap.za_name)); + else + reclen = DIRENT64_RECLEN(strlen(zap.za_name)); + + /* + * Will this entry fit in the buffer? + */ + if (outcount + reclen > bufsize) { + /* + * Did we manage to fit anything in the buffer? + */ + if (!outcount) { + error = EINVAL; + goto update; + } + break; + } + if (flags & V_RDDIR_ENTFLAGS) { + /* + * Add extended flag entry: + */ + eodp->ed_ino = objnum; + eodp->ed_reclen = reclen; + /* NOTE: ed_off is the offset for the *next* entry */ + next = &(eodp->ed_off); + eodp->ed_eflags = zap.za_normalization_conflict ? + ED_CASE_CONFLICT : 0; + (void) strncpy(eodp->ed_name, zap.za_name, + EDIRENT_NAMELEN(reclen)); + eodp = (edirent_t *)((intptr_t)eodp + reclen); + } else { + /* + * Add normal entry: + */ + odp->d_ino = objnum; + odp->d_reclen = reclen; + /* NOTE: d_off is the offset for the *next* entry */ + next = &(odp->d_off); + (void) strncpy(odp->d_name, zap.za_name, + DIRENT64_NAMELEN(reclen)); + odp = (dirent64_t *)((intptr_t)odp + reclen); + } + outcount += reclen; + + ASSERT(outcount <= bufsize); + + /* Prefetch znode */ + if (prefetch) + dmu_prefetch(os, objnum, 0, 0); + + /* + * Move to the next entry, fill in the previous offset. + */ + if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { + zap_cursor_advance(&zc); + offset = zap_cursor_serialize(&zc); + } else { + offset += 1; + } + *next = offset; + } + zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ + + if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { + iovp->iov_base += outcount; + iovp->iov_len -= outcount; + uio->uio_resid -= outcount; + } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { + /* + * Reset the pointer. + */ + offset = uio->uio_loffset; + } + +update: + zap_cursor_fini(&zc); + if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) + kmem_free(outbuf, bufsize); + + if (error == ENOENT) + error = 0; + + ZFS_ACCESSTIME_STAMP(zfsvfs, zp); + + uio->uio_loffset = offset; + ZFS_EXIT(zfsvfs); + return (error); +} + +ulong_t zfs_fsync_sync_cnt = 4; + +static int +zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + + /* + * Regardless of whether this is required for standards conformance, + * this is the logical behavior when fsync() is called on a file with + * dirty pages. We use B_ASYNC since the ZIL transactions are already + * going to be pushed out as part of the zil_commit(). + */ + if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && + (vp->v_type == VREG) && !(IS_SWAPVP(vp))) + (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); + + (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); + ZFS_EXIT(zfsvfs); + return (0); +} + + +/* + * Get the requested file attributes and place them in the provided + * vattr structure. + * + * IN: vp - vnode of file. + * vap - va_mask identifies requested attributes. + * If AT_XVATTR set, then optional attrs are requested + * flags - ATTR_NOACLCHECK (CIFS server context) + * cr - credentials of caller. + * ct - caller context + * + * OUT: vap - attribute values. + * + * RETURN: 0 (always succeeds) + */ +/* ARGSUSED */ +static int +zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_phys_t *pzp; + int error = 0; + uint64_t links; + xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ + xoptattr_t *xoap = NULL; + boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + pzp = zp->z_phys; + + mutex_enter(&zp->z_lock); + + /* + * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. + * Also, if we are the owner don't bother, since owner should + * always be allowed to read basic attributes of file. + */ + if (!(pzp->zp_flags & ZFS_ACL_TRIVIAL) && + (pzp->zp_uid != crgetuid(cr))) { + if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, + skipaclchk, cr)) { + mutex_exit(&zp->z_lock); + ZFS_EXIT(zfsvfs); + return (error); + } + } + + /* + * Return all attributes. It's cheaper to provide the answer + * than to determine whether we were asked the question. + */ + + vap->va_type = vp->v_type; + vap->va_mode = pzp->zp_mode & MODEMASK; + zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); + vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; + vap->va_nodeid = zp->z_id; + if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) + links = pzp->zp_links + 1; + else + links = pzp->zp_links; + vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ + vap->va_size = pzp->zp_size; + vap->va_rdev = vp->v_rdev; + vap->va_seq = zp->z_seq; + + /* + * Add in any requested optional attributes and the create time. + * Also set the corresponding bits in the returned attribute bitmap. + */ + if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { + if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { + xoap->xoa_archive = + ((pzp->zp_flags & ZFS_ARCHIVE) != 0); + XVA_SET_RTN(xvap, XAT_ARCHIVE); + } + + if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { + xoap->xoa_readonly = + ((pzp->zp_flags & ZFS_READONLY) != 0); + XVA_SET_RTN(xvap, XAT_READONLY); + } + + if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { + xoap->xoa_system = + ((pzp->zp_flags & ZFS_SYSTEM) != 0); + XVA_SET_RTN(xvap, XAT_SYSTEM); + } + + if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { + xoap->xoa_hidden = + ((pzp->zp_flags & ZFS_HIDDEN) != 0); + XVA_SET_RTN(xvap, XAT_HIDDEN); + } + + if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { + xoap->xoa_nounlink = + ((pzp->zp_flags & ZFS_NOUNLINK) != 0); + XVA_SET_RTN(xvap, XAT_NOUNLINK); + } + + if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { + xoap->xoa_immutable = + ((pzp->zp_flags & ZFS_IMMUTABLE) != 0); + XVA_SET_RTN(xvap, XAT_IMMUTABLE); + } + + if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { + xoap->xoa_appendonly = + ((pzp->zp_flags & ZFS_APPENDONLY) != 0); + XVA_SET_RTN(xvap, XAT_APPENDONLY); + } + + if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { + xoap->xoa_nodump = + ((pzp->zp_flags & ZFS_NODUMP) != 0); + XVA_SET_RTN(xvap, XAT_NODUMP); + } + + if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { + xoap->xoa_opaque = + ((pzp->zp_flags & ZFS_OPAQUE) != 0); + XVA_SET_RTN(xvap, XAT_OPAQUE); + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { + xoap->xoa_av_quarantined = + ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0); + XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { + xoap->xoa_av_modified = + ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0); + XVA_SET_RTN(xvap, XAT_AV_MODIFIED); + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && + vp->v_type == VREG && + (pzp->zp_flags & ZFS_BONUS_SCANSTAMP)) { + size_t len; + dmu_object_info_t doi; + + /* + * Only VREG files have anti-virus scanstamps, so we + * won't conflict with symlinks in the bonus buffer. + */ + dmu_object_info_from_db(zp->z_dbuf, &doi); + len = sizeof (xoap->xoa_av_scanstamp) + + sizeof (znode_phys_t); + if (len <= doi.doi_bonus_size) { + /* + * pzp points to the start of the + * znode_phys_t. pzp + 1 points to the + * first byte after the znode_phys_t. + */ + (void) memcpy(xoap->xoa_av_scanstamp, + pzp + 1, + sizeof (xoap->xoa_av_scanstamp)); + XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { + ZFS_TIME_DECODE(&xoap->xoa_createtime, pzp->zp_crtime); + XVA_SET_RTN(xvap, XAT_CREATETIME); + } + } + + ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime); + ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime); + ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime); + + mutex_exit(&zp->z_lock); + + dmu_object_size_from_db(zp->z_dbuf, &vap->va_blksize, &vap->va_nblocks); + + if (zp->z_blksz == 0) { + /* + * Block size hasn't been set; suggest maximal I/O transfers. + */ + vap->va_blksize = zfsvfs->z_max_blksz; + } + + ZFS_EXIT(zfsvfs); + return (0); +} + +/* + * Set the file attributes to the values contained in the + * vattr structure. + * + * IN: vp - vnode of file to be modified. + * vap - new attribute values. + * If AT_XVATTR set, then optional attrs are being set + * flags - ATTR_UTIME set if non-default time values provided. + * - ATTR_NOACLCHECK (CIFS context only). + * cr - credentials of caller. + * ct - caller context + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - ctime updated, mtime updated if size changed. + */ +/* ARGSUSED */ +static int +zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + znode_phys_t *pzp; + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zilog_t *zilog; + dmu_tx_t *tx; + vattr_t oldva; + uint_t mask = vap->va_mask; + uint_t saved_mask; + int trim_mask = 0; + uint64_t new_mode; + znode_t *attrzp; + int need_policy = FALSE; + int err; + zfs_fuid_info_t *fuidp = NULL; + xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ + xoptattr_t *xoap; + zfs_acl_t *aclp = NULL; + boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; + + if (mask == 0) + return (0); + + if (mask & AT_NOSET) + return (EINVAL); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + pzp = zp->z_phys; + zilog = zfsvfs->z_log; + + /* + * Make sure that if we have ephemeral uid/gid or xvattr specified + * that file system is at proper version level + */ + + if (zfsvfs->z_use_fuids == B_FALSE && + (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || + ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || + (mask & AT_XVATTR))) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + if (mask & AT_SIZE && vp->v_type == VDIR) { + ZFS_EXIT(zfsvfs); + return (EISDIR); + } + + if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * If this is an xvattr_t, then get a pointer to the structure of + * optional attributes. If this is NULL, then we have a vattr_t. + */ + xoap = xva_getxoptattr(xvap); + + /* + * Immutable files can only alter immutable bit and atime + */ + if ((pzp->zp_flags & ZFS_IMMUTABLE) && + ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || + ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + if ((mask & AT_SIZE) && (pzp->zp_flags & ZFS_READONLY)) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + /* + * Verify timestamps doesn't overflow 32 bits. + * ZFS can handle large timestamps, but 32bit syscalls can't + * handle times greater than 2039. This check should be removed + * once large timestamps are fully supported. + */ + if (mask & (AT_ATIME | AT_MTIME)) { + if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || + ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { + ZFS_EXIT(zfsvfs); + return (EOVERFLOW); + } + } + +top: + attrzp = NULL; + + if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { + ZFS_EXIT(zfsvfs); + return (EROFS); + } + + /* + * First validate permissions + */ + + if (mask & AT_SIZE) { + err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); + if (err) { + ZFS_EXIT(zfsvfs); + return (err); + } + /* + * XXX - Note, we are not providing any open + * mode flags here (like FNDELAY), so we may + * block if there are locks present... this + * should be addressed in openat(). + */ + /* XXX - would it be OK to generate a log record here? */ + err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); + if (err) { + ZFS_EXIT(zfsvfs); + return (err); + } + } + + if (mask & (AT_ATIME|AT_MTIME) || + ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || + XVA_ISSET_REQ(xvap, XAT_READONLY) || + XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || + XVA_ISSET_REQ(xvap, XAT_CREATETIME) || + XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) + need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, + skipaclchk, cr); + + if (mask & (AT_UID|AT_GID)) { + int idmask = (mask & (AT_UID|AT_GID)); + int take_owner; + int take_group; + + /* + * NOTE: even if a new mode is being set, + * we may clear S_ISUID/S_ISGID bits. + */ + + if (!(mask & AT_MODE)) + vap->va_mode = pzp->zp_mode; + + /* + * Take ownership or chgrp to group we are a member of + */ + + take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); + take_group = (mask & AT_GID) && + zfs_groupmember(zfsvfs, vap->va_gid, cr); + + /* + * If both AT_UID and AT_GID are set then take_owner and + * take_group must both be set in order to allow taking + * ownership. + * + * Otherwise, send the check through secpolicy_vnode_setattr() + * + */ + + if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || + ((idmask == AT_UID) && take_owner) || + ((idmask == AT_GID) && take_group)) { + if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, + skipaclchk, cr) == 0) { + /* + * Remove setuid/setgid for non-privileged users + */ + secpolicy_setid_clear(vap, cr); + trim_mask = (mask & (AT_UID|AT_GID)); + } else { + need_policy = TRUE; + } + } else { + need_policy = TRUE; + } + } + + mutex_enter(&zp->z_lock); + oldva.va_mode = pzp->zp_mode; + zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); + if (mask & AT_XVATTR) { + if ((need_policy == FALSE) && + (XVA_ISSET_REQ(xvap, XAT_APPENDONLY) && + xoap->xoa_appendonly != + ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) || + (XVA_ISSET_REQ(xvap, XAT_NOUNLINK) && + xoap->xoa_nounlink != + ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) || + (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE) && + xoap->xoa_immutable != + ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) || + (XVA_ISSET_REQ(xvap, XAT_NODUMP) && + xoap->xoa_nodump != + ((pzp->zp_flags & ZFS_NODUMP) != 0)) || + (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED) && + xoap->xoa_av_modified != + ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) || + ((XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED) && + ((vp->v_type != VREG && xoap->xoa_av_quarantined) || + xoap->xoa_av_quarantined != + ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)))) || + (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) || + (XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { + need_policy = TRUE; + } + } + + mutex_exit(&zp->z_lock); + + if (mask & AT_MODE) { + if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { + err = secpolicy_setid_setsticky_clear(vp, vap, + &oldva, cr); + if (err) { + ZFS_EXIT(zfsvfs); + return (err); + } + trim_mask |= AT_MODE; + } else { + need_policy = TRUE; + } + } + + if (need_policy) { + /* + * If trim_mask is set then take ownership + * has been granted or write_acl is present and user + * has the ability to modify mode. In that case remove + * UID|GID and or MODE from mask so that + * secpolicy_vnode_setattr() doesn't revoke it. + */ + + if (trim_mask) { + saved_mask = vap->va_mask; + vap->va_mask &= ~trim_mask; + } + err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, + (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); + if (err) { + ZFS_EXIT(zfsvfs); + return (err); + } + + if (trim_mask) + vap->va_mask |= saved_mask; + } + + /* + * secpolicy_vnode_setattr, or take ownership may have + * changed va_mask + */ + mask = vap->va_mask; + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, zp->z_id); + if (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || + ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid))) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + + if (mask & AT_MODE) { + uint64_t pmode = pzp->zp_mode; + + new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); + + if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) { + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (err); + } + if (pzp->zp_acl.z_acl_extern_obj) { + /* Are we upgrading ACL from old V0 format to new V1 */ + if (zfsvfs->z_version <= ZPL_VERSION_FUID && + pzp->zp_acl.z_acl_version == + ZFS_ACL_VERSION_INITIAL) { + dmu_tx_hold_free(tx, + pzp->zp_acl.z_acl_extern_obj, 0, + DMU_OBJECT_END); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, + 0, aclp->z_acl_bytes); + } else { + dmu_tx_hold_write(tx, + pzp->zp_acl.z_acl_extern_obj, 0, + aclp->z_acl_bytes); + } + } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, + 0, aclp->z_acl_bytes); + } + } + + if ((mask & (AT_UID | AT_GID)) && pzp->zp_xattr != 0) { + err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp); + if (err) { + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + if (aclp) + zfs_acl_free(aclp); + return (err); + } + dmu_tx_hold_bonus(tx, attrzp->z_id); + } + + err = dmu_tx_assign(tx, zfsvfs->z_assign); + if (err) { + if (attrzp) + VN_RELE(ZTOV(attrzp)); + + if (aclp) { + zfs_acl_free(aclp); + aclp = NULL; + } + + if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (err); + } + + dmu_buf_will_dirty(zp->z_dbuf, tx); + + /* + * Set each attribute requested. + * We group settings according to the locks they need to acquire. + * + * Note: you cannot set ctime directly, although it will be + * updated as a side-effect of calling this function. + */ + + mutex_enter(&zp->z_lock); + + if (mask & AT_MODE) { + mutex_enter(&zp->z_acl_lock); + zp->z_phys->zp_mode = new_mode; + err = zfs_aclset_common(zp, aclp, cr, &fuidp, tx); + ASSERT3U(err, ==, 0); + mutex_exit(&zp->z_acl_lock); + } + + if (attrzp) + mutex_enter(&attrzp->z_lock); + + if (mask & AT_UID) { + pzp->zp_uid = zfs_fuid_create(zfsvfs, + vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); + if (attrzp) { + attrzp->z_phys->zp_uid = zfs_fuid_create(zfsvfs, + vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); + } + } + + if (mask & AT_GID) { + pzp->zp_gid = zfs_fuid_create(zfsvfs, vap->va_gid, + cr, ZFS_GROUP, tx, &fuidp); + if (attrzp) + attrzp->z_phys->zp_gid = zfs_fuid_create(zfsvfs, + vap->va_gid, cr, ZFS_GROUP, tx, &fuidp); + } + + if (aclp) + zfs_acl_free(aclp); + + if (attrzp) + mutex_exit(&attrzp->z_lock); + + if (mask & AT_ATIME) + ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime); + + if (mask & AT_MTIME) + ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime); + + /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ + if (mask & AT_SIZE) + zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx); + else if (mask != 0) + zfs_time_stamper_locked(zp, STATE_CHANGED, tx); + /* + * Do this after setting timestamps to prevent timestamp + * update from toggling bit + */ + + if (xoap && (mask & AT_XVATTR)) { + if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { + size_t len; + dmu_object_info_t doi; + + ASSERT(vp->v_type == VREG); + + /* Grow the bonus buffer if necessary. */ + dmu_object_info_from_db(zp->z_dbuf, &doi); + len = sizeof (xoap->xoa_av_scanstamp) + + sizeof (znode_phys_t); + if (len > doi.doi_bonus_size) + VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0); + } + zfs_xvattr_set(zp, xvap); + } + + if (mask != 0) + zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); + + if (fuidp) + zfs_fuid_info_free(fuidp); + mutex_exit(&zp->z_lock); + + if (attrzp) + VN_RELE(ZTOV(attrzp)); + + dmu_tx_commit(tx); + + ZFS_EXIT(zfsvfs); + return (err); +} + +typedef struct zfs_zlock { + krwlock_t *zl_rwlock; /* lock we acquired */ + znode_t *zl_znode; /* znode we held */ + struct zfs_zlock *zl_next; /* next in list */ +} zfs_zlock_t; + +/* + * Drop locks and release vnodes that were held by zfs_rename_lock(). + */ +static void +zfs_rename_unlock(zfs_zlock_t **zlpp) +{ + zfs_zlock_t *zl; + + while ((zl = *zlpp) != NULL) { + if (zl->zl_znode != NULL) + VN_RELE(ZTOV(zl->zl_znode)); + rw_exit(zl->zl_rwlock); + *zlpp = zl->zl_next; + kmem_free(zl, sizeof (*zl)); + } +} + +/* + * Search back through the directory tree, using the ".." entries. + * Lock each directory in the chain to prevent concurrent renames. + * Fail any attempt to move a directory into one of its own descendants. + * XXX - z_parent_lock can overlap with map or grow locks + */ +static int +zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) +{ + zfs_zlock_t *zl; + znode_t *zp = tdzp; + uint64_t rootid = zp->z_zfsvfs->z_root; + uint64_t *oidp = &zp->z_id; + krwlock_t *rwlp = &szp->z_parent_lock; + krw_t rw = RW_WRITER; + + /* + * First pass write-locks szp and compares to zp->z_id. + * Later passes read-lock zp and compare to zp->z_parent. + */ + do { + if (!rw_tryenter(rwlp, rw)) { + /* + * Another thread is renaming in this path. + * Note that if we are a WRITER, we don't have any + * parent_locks held yet. + */ + if (rw == RW_READER && zp->z_id > szp->z_id) { + /* + * Drop our locks and restart + */ + zfs_rename_unlock(&zl); + *zlpp = NULL; + zp = tdzp; + oidp = &zp->z_id; + rwlp = &szp->z_parent_lock; + rw = RW_WRITER; + continue; + } else { + /* + * Wait for other thread to drop its locks + */ + rw_enter(rwlp, rw); + } + } + + zl = kmem_alloc(sizeof (*zl), KM_SLEEP); + zl->zl_rwlock = rwlp; + zl->zl_znode = NULL; + zl->zl_next = *zlpp; + *zlpp = zl; + + if (*oidp == szp->z_id) /* We're a descendant of szp */ + return (EINVAL); + + if (*oidp == rootid) /* We've hit the top */ + return (0); + + if (rw == RW_READER) { /* i.e. not the first pass */ + int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp); + if (error) + return (error); + zl->zl_znode = zp; + } + oidp = &zp->z_phys->zp_parent; + rwlp = &zp->z_parent_lock; + rw = RW_READER; + + } while (zp->z_id != sdzp->z_id); + + return (0); +} + +/* + * Move an entry from the provided source directory to the target + * directory. Change the entry name as indicated. + * + * IN: sdvp - Source directory containing the "old entry". + * snm - Old entry name. + * tdvp - Target directory to contain the "new entry". + * tnm - New entry name. + * cr - credentials of caller. + * ct - caller context + * flags - case flags + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * sdvp,tdvp - ctime|mtime updated + */ +/*ARGSUSED*/ +static int +zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, + caller_context_t *ct, int flags) +{ + znode_t *tdzp, *szp, *tzp; + znode_t *sdzp = VTOZ(sdvp); + zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; + zilog_t *zilog; + vnode_t *realvp; + zfs_dirlock_t *sdl, *tdl; + dmu_tx_t *tx; + zfs_zlock_t *zl; + int cmp, serr, terr; + int error = 0; + int zflg = 0; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(sdzp); + zilog = zfsvfs->z_log; + + /* + * Make sure we have the real vp for the target directory. + */ + if (VOP_REALVP(tdvp, &realvp, ct) == 0) + tdvp = realvp; + + if (tdvp->v_vfsp != sdvp->v_vfsp) { + ZFS_EXIT(zfsvfs); + return (EXDEV); + } + + tdzp = VTOZ(tdvp); + ZFS_VERIFY_ZP(tdzp); + if (zfsvfs->z_utf8 && u8_validate(tnm, + strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + + if (flags & FIGNORECASE) + zflg |= ZCILOOK; + +top: + szp = NULL; + tzp = NULL; + zl = NULL; + + /* + * This is to prevent the creation of links into attribute space + * by renaming a linked file into/outof an attribute directory. + * See the comment in zfs_link() for why this is considered bad. + */ + if ((tdzp->z_phys->zp_flags & ZFS_XATTR) != + (sdzp->z_phys->zp_flags & ZFS_XATTR)) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * Lock source and target directory entries. To prevent deadlock, + * a lock ordering must be defined. We lock the directory with + * the smallest object id first, or if it's a tie, the one with + * the lexically first name. + */ + if (sdzp->z_id < tdzp->z_id) { + cmp = -1; + } else if (sdzp->z_id > tdzp->z_id) { + cmp = 1; + } else { + /* + * First compare the two name arguments without + * considering any case folding. + */ + int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); + + cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); + ASSERT(error == 0 || !zfsvfs->z_utf8); + if (cmp == 0) { + /* + * POSIX: "If the old argument and the new argument + * both refer to links to the same existing file, + * the rename() function shall return successfully + * and perform no other action." + */ + ZFS_EXIT(zfsvfs); + return (0); + } + /* + * If the file system is case-folding, then we may + * have some more checking to do. A case-folding file + * system is either supporting mixed case sensitivity + * access or is completely case-insensitive. Note + * that the file system is always case preserving. + * + * In mixed sensitivity mode case sensitive behavior + * is the default. FIGNORECASE must be used to + * explicitly request case insensitive behavior. + * + * If the source and target names provided differ only + * by case (e.g., a request to rename 'tim' to 'Tim'), + * we will treat this as a special case in the + * case-insensitive mode: as long as the source name + * is an exact match, we will allow this to proceed as + * a name-change request. + */ + if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || + (zfsvfs->z_case == ZFS_CASE_MIXED && + flags & FIGNORECASE)) && + u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, + &error) == 0) { + /* + * case preserving rename request, require exact + * name matches + */ + zflg |= ZCIEXACT; + zflg &= ~ZCILOOK; + } + } + + if (cmp < 0) { + serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, + ZEXISTS | zflg, NULL, NULL); + terr = zfs_dirent_lock(&tdl, + tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); + } else { + terr = zfs_dirent_lock(&tdl, + tdzp, tnm, &tzp, zflg, NULL, NULL); + serr = zfs_dirent_lock(&sdl, + sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, + NULL, NULL); + } + + if (serr) { + /* + * Source entry invalid or not there. + */ + if (!terr) { + zfs_dirent_unlock(tdl); + if (tzp) + VN_RELE(ZTOV(tzp)); + } + if (strcmp(snm, "..") == 0) + serr = EINVAL; + ZFS_EXIT(zfsvfs); + return (serr); + } + if (terr) { + zfs_dirent_unlock(sdl); + VN_RELE(ZTOV(szp)); + if (strcmp(tnm, "..") == 0) + terr = EINVAL; + ZFS_EXIT(zfsvfs); + return (terr); + } + + /* + * Must have write access at the source to remove the old entry + * and write access at the target to create the new entry. + * Note that if target and source are the same, this can be + * done in a single check. + */ + + if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) + goto out; + + if (ZTOV(szp)->v_type == VDIR) { + /* + * Check to make sure rename is valid. + * Can't do a move like this: /usr/a/b to /usr/a/b/c/d + */ + if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) + goto out; + } + + /* + * Does target exist? + */ + if (tzp) { + /* + * Source and target must be the same type. + */ + if (ZTOV(szp)->v_type == VDIR) { + if (ZTOV(tzp)->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + } else { + if (ZTOV(tzp)->v_type == VDIR) { + error = EISDIR; + goto out; + } + } + /* + * POSIX dictates that when the source and target + * entries refer to the same file object, rename + * must do nothing and exit without error. + */ + if (szp->z_id == tzp->z_id) { + error = 0; + goto out; + } + } + + vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); + if (tzp) + vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); + + /* + * notify the target directory if it is not the same + * as source directory. + */ + if (tdvp != sdvp) { + vnevent_rename_dest_dir(tdvp, ct); + } + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, szp->z_id); /* nlink changes */ + dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */ + dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); + dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); + if (sdzp != tdzp) + dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */ + if (tzp) + dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */ + dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + if (zl != NULL) + zfs_rename_unlock(&zl); + zfs_dirent_unlock(sdl); + zfs_dirent_unlock(tdl); + VN_RELE(ZTOV(szp)); + if (tzp) + VN_RELE(ZTOV(tzp)); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (error); + } + + if (tzp) /* Attempt to remove the existing target */ + error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); + + if (error == 0) { + error = zfs_link_create(tdl, szp, tx, ZRENAMING); + if (error == 0) { + szp->z_phys->zp_flags |= ZFS_AV_MODIFIED; + + error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); + ASSERT(error == 0); + + zfs_log_rename(zilog, tx, + TX_RENAME | (flags & FIGNORECASE ? TX_CI : 0), + sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp); + + /* Update path information for the target vnode */ + vn_renamepath(tdvp, ZTOV(szp), tnm, strlen(tnm)); + } + } + + dmu_tx_commit(tx); +out: + if (zl != NULL) + zfs_rename_unlock(&zl); + + zfs_dirent_unlock(sdl); + zfs_dirent_unlock(tdl); + + VN_RELE(ZTOV(szp)); + if (tzp) + VN_RELE(ZTOV(tzp)); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Insert the indicated symbolic reference entry into the directory. + * + * IN: dvp - Directory to contain new symbolic link. + * link - Name for new symlink entry. + * vap - Attributes of new entry. + * target - Target path of new symlink. + * cr - credentials of caller. + * ct - caller context + * flags - case flags + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * dvp - ctime|mtime updated + */ +/*ARGSUSED*/ +static int +zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, + caller_context_t *ct, int flags) +{ + znode_t *zp, *dzp = VTOZ(dvp); + zfs_dirlock_t *dl; + dmu_tx_t *tx; + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + int len = strlen(link); + int error; + int zflg = ZNEW; + zfs_fuid_info_t *fuidp = NULL; + + ASSERT(vap->va_type == VLNK); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + zilog = zfsvfs->z_log; + + if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), + NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + if (flags & FIGNORECASE) + zflg |= ZCILOOK; +top: + if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + if (len > MAXPATHLEN) { + ZFS_EXIT(zfsvfs); + return (ENAMETOOLONG); + } + + /* + * Attempt to lock directory; fail if entry already exists. + */ + error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); + if (error) { + ZFS_EXIT(zfsvfs); + return (error); + } + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); + dmu_tx_hold_bonus(tx, dzp->z_id); + dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); + if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); + if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) { + if (zfsvfs->z_fuid_obj == 0) { + dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); + } else { + dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); + dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, + FUID_SIZE_ESTIMATE(zfsvfs)); + } + } + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + zfs_dirent_unlock(dl); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (error); + } + + dmu_buf_will_dirty(dzp->z_dbuf, tx); + + /* + * Create a new object for the symlink. + * Put the link content into bonus buffer if it will fit; + * otherwise, store it just like any other file data. + */ + if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) { + zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, NULL, &fuidp); + if (len != 0) + bcopy(link, zp->z_phys + 1, len); + } else { + dmu_buf_t *dbp; + + zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, NULL, &fuidp); + /* + * Nothing can access the znode yet so no locking needed + * for growing the znode's blocksize. + */ + zfs_grow_blocksize(zp, len, tx); + + VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, + zp->z_id, 0, FTAG, &dbp)); + dmu_buf_will_dirty(dbp, tx); + + ASSERT3U(len, <=, dbp->db_size); + bcopy(link, dbp->db_data, len); + dmu_buf_rele(dbp, FTAG); + } + zp->z_phys->zp_size = len; + + /* + * Insert the new object into the directory. + */ + (void) zfs_link_create(dl, zp, tx, ZNEW); +out: + if (error == 0) { + uint64_t txtype = TX_SYMLINK; + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); + } + if (fuidp) + zfs_fuid_info_free(fuidp); + + dmu_tx_commit(tx); + + zfs_dirent_unlock(dl); + + VN_RELE(ZTOV(zp)); + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Return, in the buffer contained in the provided uio structure, + * the symbolic path referred to by vp. + * + * IN: vp - vnode of symbolic link. + * uoip - structure to contain the link path. + * cr - credentials of caller. + * ct - caller context + * + * OUT: uio - structure to contain the link path. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - atime updated + */ +/* ARGSUSED */ +static int +zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + size_t bufsz; + int error; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + bufsz = (size_t)zp->z_phys->zp_size; + if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) { + error = uiomove(zp->z_phys + 1, + MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); + } else { + dmu_buf_t *dbp; + error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp); + if (error) { + ZFS_EXIT(zfsvfs); + return (error); + } + error = uiomove(dbp->db_data, + MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); + dmu_buf_rele(dbp, FTAG); + } + + ZFS_ACCESSTIME_STAMP(zfsvfs, zp); + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * Insert a new entry into directory tdvp referencing svp. + * + * IN: tdvp - Directory to contain new entry. + * svp - vnode of new entry. + * name - name of new entry. + * cr - credentials of caller. + * ct - caller context + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * tdvp - ctime|mtime updated + * svp - ctime updated + */ +/* ARGSUSED */ +static int +zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, + caller_context_t *ct, int flags) +{ + znode_t *dzp = VTOZ(tdvp); + znode_t *tzp, *szp; + zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zilog_t *zilog; + zfs_dirlock_t *dl; + dmu_tx_t *tx; + vnode_t *realvp; + int error; + int zf = ZNEW; + uid_t owner; + + ASSERT(tdvp->v_type == VDIR); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(dzp); + zilog = zfsvfs->z_log; + + if (VOP_REALVP(svp, &realvp, ct) == 0) + svp = realvp; + + if (svp->v_vfsp != tdvp->v_vfsp) { + ZFS_EXIT(zfsvfs); + return (EXDEV); + } + szp = VTOZ(svp); + ZFS_VERIFY_ZP(szp); + + if (zfsvfs->z_utf8 && u8_validate(name, + strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { + ZFS_EXIT(zfsvfs); + return (EILSEQ); + } + if (flags & FIGNORECASE) + zf |= ZCILOOK; + +top: + /* + * We do not support links between attributes and non-attributes + * because of the potential security risk of creating links + * into "normal" file space in order to circumvent restrictions + * imposed in attribute space. + */ + if ((szp->z_phys->zp_flags & ZFS_XATTR) != + (dzp->z_phys->zp_flags & ZFS_XATTR)) { + ZFS_EXIT(zfsvfs); + return (EINVAL); + } + + /* + * POSIX dictates that we return EPERM here. + * Better choices include ENOTSUP or EISDIR. + */ + if (svp->v_type == VDIR) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + owner = zfs_fuid_map_id(zfsvfs, szp->z_phys->zp_uid, cr, ZFS_OWNER); + if (owner != crgetuid(cr) && + secpolicy_basic_link(cr) != 0) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { + ZFS_EXIT(zfsvfs); + return (error); + } + + /* + * Attempt to lock directory; fail if entry already exists. + */ + error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); + if (error) { + ZFS_EXIT(zfsvfs); + return (error); + } + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_bonus(tx, szp->z_id); + dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); + error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (error) { + zfs_dirent_unlock(dl); + if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); + ZFS_EXIT(zfsvfs); + return (error); + } + + error = zfs_link_create(dl, szp, tx, 0); + + if (error == 0) { + uint64_t txtype = TX_LINK; + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_link(zilog, tx, txtype, dzp, szp, name); + } + + dmu_tx_commit(tx); + + zfs_dirent_unlock(dl); + + if (error == 0) { + vnevent_link(svp, ct); + } + + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * zfs_null_putapage() is used when the file system has been force + * unmounted. It just drops the pages. + */ +/* ARGSUSED */ +static int +zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, + size_t *lenp, int flags, cred_t *cr) +{ + pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); + return (0); +} + +/* + * Push a page out to disk, klustering if possible. + * + * IN: vp - file to push page to. + * pp - page to push. + * flags - additional flags. + * cr - credentials of caller. + * + * OUT: offp - start of range pushed. + * lenp - len of range pushed. + * + * RETURN: 0 if success + * error code if failure + * + * NOTE: callers must have locked the page to be pushed. On + * exit, the page (and all other pages in the kluster) must be + * unlocked. + */ +/* ARGSUSED */ +static int +zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, + size_t *lenp, int flags, cred_t *cr) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zilog_t *zilog = zfsvfs->z_log; + dmu_tx_t *tx; + rl_t *rl; + u_offset_t off, koff; + size_t len, klen; + uint64_t filesz; + int err; + + filesz = zp->z_phys->zp_size; + off = pp->p_offset; + len = PAGESIZE; + /* + * If our blocksize is bigger than the page size, try to kluster + * muiltiple pages so that we write a full block (thus avoiding + * a read-modify-write). + */ + if (off < filesz && zp->z_blksz > PAGESIZE) { + if (!ISP2(zp->z_blksz)) { + /* Only one block in the file. */ + klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); + koff = 0; + } else { + klen = zp->z_blksz; + koff = P2ALIGN(off, (u_offset_t)klen); + } + ASSERT(koff <= filesz); + if (koff + klen > filesz) + klen = P2ROUNDUP(filesz - koff, (uint64_t)PAGESIZE); + pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); + } + ASSERT3U(btop(len), ==, btopr(len)); +top: + rl = zfs_range_lock(zp, off, len, RL_WRITER); + /* + * Can't push pages past end-of-file. + */ + filesz = zp->z_phys->zp_size; + if (off >= filesz) { + /* ignore all pages */ + err = 0; + goto out; + } else if (off + len > filesz) { + int npages = btopr(filesz - off); + page_t *trunc; + + page_list_break(&pp, &trunc, npages); + /* ignore pages past end of file */ + if (trunc) + pvn_write_done(trunc, flags); + len = filesz - off; + } + + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_write(tx, zp->z_id, off, len); + dmu_tx_hold_bonus(tx, zp->z_id); + err = dmu_tx_assign(tx, zfsvfs->z_assign); + if (err != 0) { + if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + zfs_range_unlock(rl); + dmu_tx_wait(tx); + dmu_tx_abort(tx); + err = 0; + goto top; + } + dmu_tx_abort(tx); + goto out; + } + + if (zp->z_blksz <= PAGESIZE) { + caddr_t va = zfs_map_page(pp, S_READ); + ASSERT3U(len, <=, PAGESIZE); + dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); + zfs_unmap_page(pp, va); + } else { + err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); + } + + if (err == 0) { + zfs_time_stamper(zp, CONTENT_MODIFIED, tx); + zfs_log_write(zilog, tx, TX_WRITE, zp, off, len, 0); + dmu_tx_commit(tx); + } + +out: + zfs_range_unlock(rl); + pvn_write_done(pp, (err ? B_ERROR : 0) | flags); + if (offp) + *offp = off; + if (lenp) + *lenp = len; + + return (err); +} + +/* + * Copy the portion of the file indicated from pages into the file. + * The pages are stored in a page list attached to the files vnode. + * + * IN: vp - vnode of file to push page data to. + * off - position in file to put data. + * len - amount of data to write. + * flags - flags to control the operation. + * cr - credentials of caller. + * ct - caller context. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - ctime|mtime updated + */ +/*ARGSUSED*/ +static int +zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + page_t *pp; + size_t io_len; + u_offset_t io_off; + uint64_t filesz; + int error = 0; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if (len == 0) { + /* + * Search the entire vp list for pages >= off. + */ + error = pvn_vplist_dirty(vp, (u_offset_t)off, zfs_putapage, + flags, cr); + goto out; + } + + filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ + if (off > filesz) { + /* past end of file */ + ZFS_EXIT(zfsvfs); + return (0); + } + + len = MIN(len, filesz - off); + + for (io_off = off; io_off < off + len; io_off += io_len) { + if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { + pp = page_lookup(vp, io_off, + (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); + } else { + pp = page_lookup_nowait(vp, io_off, + (flags & B_FREE) ? SE_EXCL : SE_SHARED); + } + + if (pp != NULL && pvn_getdirty(pp, flags)) { + int err; + + /* + * Found a dirty page to push + */ + err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); + if (err) + error = err; + } else { + io_len = PAGESIZE; + } + } +out: + if ((flags & B_ASYNC) == 0) + zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id); + ZFS_EXIT(zfsvfs); + return (error); +} + +/*ARGSUSED*/ +void +zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int error; + + rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); + if (zp->z_dbuf == NULL) { + /* + * The fs has been unmounted, or we did a + * suspend/resume and this file no longer exists. + */ + if (vn_has_cached_data(vp)) { + (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, + B_INVAL, cr); + } + + mutex_enter(&zp->z_lock); + vp->v_count = 0; /* count arrives as 1 */ + mutex_exit(&zp->z_lock); + rw_exit(&zfsvfs->z_teardown_inactive_lock); + zfs_znode_free(zp); + return; + } + + /* + * Attempt to push any data in the page cache. If this fails + * we will get kicked out later in zfs_zinactive(). + */ + if (vn_has_cached_data(vp)) { + (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, + cr); + } + + if (zp->z_atime_dirty && zp->z_unlinked == 0) { + dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); + + dmu_tx_hold_bonus(tx, zp->z_id); + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + } else { + dmu_buf_will_dirty(zp->z_dbuf, tx); + mutex_enter(&zp->z_lock); + zp->z_atime_dirty = 0; + mutex_exit(&zp->z_lock); + dmu_tx_commit(tx); + } + } + + zfs_zinactive(zp); + rw_exit(&zfsvfs->z_teardown_inactive_lock); +} + +/* + * Bounds-check the seek operation. + * + * IN: vp - vnode seeking within + * ooff - old file offset + * noffp - pointer to new file offset + * ct - caller context + * + * RETURN: 0 if success + * EINVAL if new offset invalid + */ +/* ARGSUSED */ +static int +zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, + caller_context_t *ct) +{ + if (vp->v_type == VDIR) + return (0); + return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); +} + +/* + * Pre-filter the generic locking function to trap attempts to place + * a mandatory lock on a memory mapped file. + */ +static int +zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, + flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int error; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + /* + * We are following the UFS semantics with respect to mapcnt + * here: If we see that the file is mapped already, then we will + * return an error, but we don't worry about races between this + * function and zfs_map(). + */ + if (zp->z_mapcnt > 0 && MANDMODE((mode_t)zp->z_phys->zp_mode)) { + ZFS_EXIT(zfsvfs); + return (EAGAIN); + } + error = fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct); + ZFS_EXIT(zfsvfs); + return (error); +} + +/* + * If we can't find a page in the cache, we will create a new page + * and fill it with file data. For efficiency, we may try to fill + * multiple pages at once (klustering). + */ +static int +zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, + caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) +{ + znode_t *zp = VTOZ(vp); + page_t *pp, *cur_pp; + objset_t *os = zp->z_zfsvfs->z_os; + caddr_t va; + u_offset_t io_off, total; + uint64_t oid = zp->z_id; + size_t io_len; + uint64_t filesz; + int err; + + /* + * If we are only asking for a single page don't bother klustering. + */ + filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ + if (off >= filesz) + return (EFAULT); + if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { + io_off = off; + io_len = PAGESIZE; + pp = page_create_va(vp, io_off, io_len, PG_WAIT, seg, addr); + } else { + /* + * Try to fill a kluster of pages (a blocks worth). + */ + size_t klen; + u_offset_t koff; + + if (!ISP2(zp->z_blksz)) { + /* Only one block in the file. */ + klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); + koff = 0; + } else { + /* + * It would be ideal to align our offset to the + * blocksize but doing so has resulted in some + * strange application crashes. For now, we + * leave the offset as is and only adjust the + * length if we are off the end of the file. + */ + koff = off; + klen = plsz; + } + ASSERT(koff <= filesz); + if (koff + klen > filesz) + klen = P2ROUNDUP(filesz, (uint64_t)PAGESIZE) - koff; + ASSERT3U(off, >=, koff); + ASSERT3U(off, <, koff + klen); + pp = pvn_read_kluster(vp, off, seg, addr, &io_off, + &io_len, koff, klen, 0); + } + if (pp == NULL) { + /* + * Some other thread entered the page before us. + * Return to zfs_getpage to retry the lookup. + */ + *pl = NULL; + return (0); + } + + /* + * Fill the pages in the kluster. + */ + cur_pp = pp; + for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { + ASSERT3U(io_off, ==, cur_pp->p_offset); + va = zfs_map_page(cur_pp, S_WRITE); + err = dmu_read(os, oid, io_off, PAGESIZE, va); + zfs_unmap_page(cur_pp, va); + if (err) { + /* On error, toss the entire kluster */ + pvn_read_done(pp, B_ERROR); + /* convert checksum errors into IO errors */ + if (err == ECKSUM) + err = EIO; + return (err); + } + cur_pp = cur_pp->p_next; + } +out: + /* + * Fill in the page list array from the kluster. If + * there are too many pages in the kluster, return + * as many pages as possible starting from the desired + * offset `off'. + * NOTE: the page list will always be null terminated. + */ + pvn_plist_init(pp, pl, plsz, off, io_len, rw); + + return (0); +} + +/* + * Return pointers to the pages for the file region [off, off + len] + * in the pl array. If plsz is greater than len, this function may + * also return page pointers from before or after the specified + * region (i.e. some region [off', off' + plsz]). These additional + * pages are only returned if they are already in the cache, or were + * created as part of a klustered read. + * + * IN: vp - vnode of file to get data from. + * off - position in file to get data from. + * len - amount of data to retrieve. + * plsz - length of provided page list. + * seg - segment to obtain pages for. + * addr - virtual address of fault. + * rw - mode of created pages. + * cr - credentials of caller. + * ct - caller context. + * + * OUT: protp - protection mode of created pages. + * pl - list of pages created. + * + * RETURN: 0 if success + * error code if failure + * + * Timestamps: + * vp - atime updated + */ +/* ARGSUSED */ +static int +zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, + page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, + enum seg_rw rw, cred_t *cr, caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + page_t *pp, **pl0 = pl; + int need_unlock = 0, err = 0; + offset_t orig_off; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if (protp) + *protp = PROT_ALL; + + /* no faultahead (for now) */ + if (pl == NULL) { + ZFS_EXIT(zfsvfs); + return (0); + } + + /* can't fault past EOF */ + if (off >= zp->z_phys->zp_size) { + ZFS_EXIT(zfsvfs); + return (EFAULT); + } + orig_off = off; + + /* + * If we already own the lock, then we must be page faulting + * in the middle of a write to this file (i.e., we are writing + * to this file using data from a mapped region of the file). + */ + if (rw_owner(&zp->z_map_lock) != curthread) { + rw_enter(&zp->z_map_lock, RW_WRITER); + need_unlock = TRUE; + } + + /* + * Loop through the requested range [off, off + len] looking + * for pages. If we don't find a page, we will need to create + * a new page and fill it with data from the file. + */ + while (len > 0) { + if (plsz < PAGESIZE) + break; + if (pp = page_lookup(vp, off, SE_SHARED)) { + *pl++ = pp; + off += PAGESIZE; + addr += PAGESIZE; + len -= PAGESIZE; + plsz -= PAGESIZE; + } else { + err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw); + if (err) + goto out; + /* + * klustering may have changed our region + * to be block aligned. + */ + if (((pp = *pl) != 0) && (off != pp->p_offset)) { + int delta = off - pp->p_offset; + len += delta; + off -= delta; + addr -= delta; + } + while (*pl) { + pl++; + off += PAGESIZE; + addr += PAGESIZE; + plsz -= PAGESIZE; + if (len > PAGESIZE) + len -= PAGESIZE; + else + len = 0; + } + } + } + + /* + * Fill out the page array with any pages already in the cache. + */ + while (plsz > 0) { + pp = page_lookup_nowait(vp, off, SE_SHARED); + if (pp == NULL) + break; + *pl++ = pp; + off += PAGESIZE; + plsz -= PAGESIZE; + } + + ZFS_ACCESSTIME_STAMP(zfsvfs, zp); +out: + /* + * We can't grab the range lock for the page as reader which would + * stop truncation as this leads to deadlock. So we need to recheck + * the file size. + */ + if (orig_off >= zp->z_phys->zp_size) + err = EFAULT; + if (err) { + /* + * Release any pages we have previously locked. + */ + while (pl > pl0) + page_unlock(*--pl); + } + + *pl = NULL; + + if (need_unlock) + rw_exit(&zp->z_map_lock); + + ZFS_EXIT(zfsvfs); + return (err); +} + +/* + * Request a memory map for a section of a file. This code interacts + * with common code and the VM system as follows: + * + * common code calls mmap(), which ends up in smmap_common() + * + * this calls VOP_MAP(), which takes you into (say) zfs + * + * zfs_map() calls as_map(), passing segvn_create() as the callback + * + * segvn_create() creates the new segment and calls VOP_ADDMAP() + * + * zfs_addmap() updates z_mapcnt + */ +/*ARGSUSED*/ +static int +zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, + size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, + caller_context_t *ct) +{ + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + segvn_crargs_t vn_a; + int error; + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if ((prot & PROT_WRITE) && + (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_READONLY | + ZFS_APPENDONLY))) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + if ((prot & (PROT_READ | PROT_EXEC)) && + (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED)) { + ZFS_EXIT(zfsvfs); + return (EACCES); + } + + if (vp->v_flag & VNOMAP) { + ZFS_EXIT(zfsvfs); + return (ENOSYS); + } + + if (off < 0 || len > MAXOFFSET_T - off) { + ZFS_EXIT(zfsvfs); + return (ENXIO); + } + + if (vp->v_type != VREG) { + ZFS_EXIT(zfsvfs); + return (ENODEV); + } + + /* + * If file is locked, disallow mapping. + */ + if (MANDMODE((mode_t)zp->z_phys->zp_mode) && vn_has_flocks(vp)) { + ZFS_EXIT(zfsvfs); + return (EAGAIN); + } + + as_rangelock(as); + error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); + if (error != 0) { + as_rangeunlock(as); + ZFS_EXIT(zfsvfs); + return (error); + } + + vn_a.vp = vp; + vn_a.offset = (u_offset_t)off; + vn_a.type = flags & MAP_TYPE; + vn_a.prot = prot; + vn_a.maxprot = maxprot; + vn_a.cred = cr; + vn_a.amp = NULL; + vn_a.flags = flags & ~MAP_TYPE; + vn_a.szc = 0; + vn_a.lgrp_mem_policy_flags = 0; + + error = as_map(as, *addrp, len, segvn_create, &vn_a); + + as_rangeunlock(as); + ZFS_EXIT(zfsvfs); + return (error); +} + +/* ARGSUSED */ +static int +zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, + size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, + caller_context_t *ct) +{ + uint64_t pages = btopr(len); + + atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); + return (0); +} + +/* + * The reason we push dirty pages as part of zfs_delmap() is so that we get a + * more accurate mtime for the associated file. Since we don't have a way of + * detecting when the data was actually modified, we have to resort to + * heuristics. If an explicit msync() is done, then we mark the mtime when the + * last page is pushed. The problem occurs when the msync() call is omitted, + * which by far the most common case: + * + * open() + * mmap() + * + * munmap() + * close() + *