4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Portions Copyright 2007 Jeremy Teo */
28 #pragma ident "@(#)zfs_znode.c 1.34 08/04/27 SMI"
31 #include <sys/types.h>
32 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/resource.h>
37 #include <sys/mntent.h>
38 #include <sys/mkdev.h>
39 #include <sys/u8_textprep.h>
40 #include <sys/dsl_dataset.h>
42 #include <sys/vfs_opreg.h>
43 #include <sys/vnode.h>
46 #include <sys/errno.h>
47 #include <sys/unistd.h>
49 #include <sys/atomic.h>
51 #include "fs/fs_subr.h"
52 #include <sys/zfs_dir.h>
53 #include <sys/zfs_acl.h>
54 #include <sys/zfs_ioctl.h>
55 #include <sys/zfs_rlock.h>
56 #include <sys/zfs_fuid.h>
57 #include <sys/fs/zfs.h>
58 #include <sys/kidmap.h>
62 #include <sys/refcount.h>
65 #include <sys/zfs_znode.h>
70 * Functions needed for userland (ie: libzpool) are not put under
71 * #ifdef_KERNEL; the rest of the functions have dependencies
72 * (such as VFS logic) that will not compile easily in userland.
75 struct kmem_cache *znode_cache = NULL;
79 znode_evict_error(dmu_buf_t *dbuf, void *user_ptr)
82 * We should never drop all dbuf refs without first clearing
83 * the eviction callback.
85 panic("evicting znode %p\n", user_ptr);
90 zfs_znode_cache_constructor(void *buf, void *cdrarg, int kmflags)
94 zp->z_vnode = vn_alloc(KM_SLEEP);
95 zp->z_vnode->v_data = (caddr_t)zp;
96 mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
97 rw_init(&zp->z_map_lock, NULL, RW_DEFAULT, NULL);
98 rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
99 rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
100 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
102 mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
103 avl_create(&zp->z_range_avl, zfs_range_compare,
104 sizeof (rl_t), offsetof(rl_t, r_node));
113 zfs_znode_cache_destructor(void *buf, void *cdarg)
117 ASSERT(zp->z_dirlocks == 0);
118 mutex_destroy(&zp->z_lock);
119 rw_destroy(&zp->z_map_lock);
120 rw_destroy(&zp->z_parent_lock);
121 rw_destroy(&zp->z_name_lock);
122 mutex_destroy(&zp->z_acl_lock);
123 avl_destroy(&zp->z_range_avl);
124 mutex_destroy(&zp->z_range_lock);
126 ASSERT(zp->z_dbuf == NULL);
127 ASSERT(ZTOV(zp)->v_count == 0);
137 ASSERT(znode_cache == NULL);
138 znode_cache = kmem_cache_create("zfs_znode_cache",
139 sizeof (znode_t), 0, zfs_znode_cache_constructor,
140 zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
147 * Cleanup vfs & vnode ops
149 zfs_remove_op_tables();
155 kmem_cache_destroy(znode_cache);
159 struct vnodeops *zfs_dvnodeops;
160 struct vnodeops *zfs_fvnodeops;
161 struct vnodeops *zfs_symvnodeops;
162 struct vnodeops *zfs_xdvnodeops;
163 struct vnodeops *zfs_evnodeops;
166 zfs_remove_op_tables()
172 (void) vfs_freevfsops_by_type(zfsfstype);
179 vn_freevnodeops(zfs_dvnodeops);
181 vn_freevnodeops(zfs_fvnodeops);
183 vn_freevnodeops(zfs_symvnodeops);
185 vn_freevnodeops(zfs_xdvnodeops);
187 vn_freevnodeops(zfs_evnodeops);
189 zfs_dvnodeops = NULL;
190 zfs_fvnodeops = NULL;
191 zfs_symvnodeops = NULL;
192 zfs_xdvnodeops = NULL;
193 zfs_evnodeops = NULL;
196 extern const fs_operation_def_t zfs_dvnodeops_template[];
197 extern const fs_operation_def_t zfs_fvnodeops_template[];
198 extern const fs_operation_def_t zfs_xdvnodeops_template[];
199 extern const fs_operation_def_t zfs_symvnodeops_template[];
200 extern const fs_operation_def_t zfs_evnodeops_template[];
203 zfs_create_op_tables()
208 * zfs_dvnodeops can be set if mod_remove() calls mod_installfs()
209 * due to a failure to remove the the 2nd modlinkage (zfs_modldrv).
210 * In this case we just return as the ops vectors are already set up.
215 error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template,
220 error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template,
225 error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template,
230 error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template,
235 error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template,
242 * zfs_init_fs - Initialize the zfsvfs struct and the file system
243 * incore "master" object. Verify version compatibility.
246 zfs_init_fs(zfsvfs_t *zfsvfs, znode_t **zpp, cred_t *cr)
248 extern int zfsfstype;
250 objset_t *os = zfsvfs->z_os;
252 dmu_object_info_t doi;
259 * XXX - hack to auto-create the pool root filesystem at
260 * the first attempted mount.
262 if (dmu_object_info(os, MASTER_NODE_OBJ, &doi) == ENOENT) {
263 dmu_tx_t *tx = dmu_tx_create(os);
264 uint64_t zpl_version;
267 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL); /* master */
268 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL); /* del queue */
269 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); /* root node */
270 error = dmu_tx_assign(tx, TXG_WAIT);
271 ASSERT3U(error, ==, 0);
272 if (spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID)
273 zpl_version = ZPL_VERSION;
275 zpl_version = ZPL_VERSION_FUID - 1;
277 VERIFY(nvlist_alloc(&zprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
278 VERIFY(nvlist_add_uint64(zprops,
279 zfs_prop_to_name(ZFS_PROP_VERSION), zpl_version) == 0);
280 zfs_create_fs(os, cr, zprops, tx);
285 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
288 } else if (zfsvfs->z_version > ZPL_VERSION) {
289 (void) printf("Mismatched versions: File system "
290 "is version %llu on-disk format, which is "
291 "incompatible with this software version %lld!",
292 (u_longlong_t)zfsvfs->z_version, ZPL_VERSION);
296 if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
298 zfsvfs->z_norm = (int)zval;
299 if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
301 zfsvfs->z_utf8 = (zval != 0);
302 if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
304 zfsvfs->z_case = (uint_t)zval;
306 * Fold case on file systems that are always or sometimes case
309 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
310 zfsvfs->z_case == ZFS_CASE_MIXED)
311 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
314 * The fsid is 64 bits, composed of an 8-bit fs type, which
315 * separates our fsid from any other filesystem types, and a
316 * 56-bit objset unique ID. The objset unique ID is unique to
317 * all objsets open on this system, provided by unique_create().
318 * The 8-bit fs type must be put in the low bits of fsid[1]
319 * because that's where other Solaris filesystems put it.
321 fsid_guid = dmu_objset_fsid_guid(os);
322 ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
323 zfsvfs->z_vfs->vfs_fsid.val[0] = fsid_guid;
324 zfsvfs->z_vfs->vfs_fsid.val[1] = ((fsid_guid>>32) << 8) |
327 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
331 ASSERT(zfsvfs->z_root != 0);
333 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
334 &zfsvfs->z_unlinkedobj);
339 * Initialize zget mutex's
341 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
342 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
344 error = zfs_zget(zfsvfs, zfsvfs->z_root, zpp);
347 * On error, we destroy the mutexes here since it's not
348 * possible for the caller to determine if the mutexes were
349 * initialized properly.
351 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
352 mutex_destroy(&zfsvfs->z_hold_mtx[i]);
355 ASSERT3U((*zpp)->z_id, ==, zfsvfs->z_root);
356 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
357 &zfsvfs->z_fuid_obj);
365 * define a couple of values we need available
366 * for both 64 and 32 bit environments.
369 #define NBITSMINOR64 32
372 #define MAXMAJ64 0xffffffffUL
375 #define MAXMIN64 0xffffffffUL
379 * Create special expldev for ZFS private use.
380 * Can't use standard expldev since it doesn't do
381 * what we want. The standard expldev() takes a
382 * dev32_t in LP64 and expands it to a long dev_t.
383 * We need an interface that takes a dev32_t in ILP32
384 * and expands it to a long dev_t.
387 zfs_expldev(dev_t dev)
390 major_t major = (major_t)dev >> NBITSMINOR32 & MAXMAJ32;
391 return (((uint64_t)major << NBITSMINOR64) |
392 ((minor_t)dev & MAXMIN32));
399 * Special cmpldev for ZFS private use.
400 * Can't use standard cmpldev since it takes
401 * a long dev_t and compresses it to dev32_t in
402 * LP64. We need to do a compaction of a long dev_t
403 * to a dev32_t in ILP32.
406 zfs_cmpldev(uint64_t dev)
409 minor_t minor = (minor_t)dev & MAXMIN64;
410 major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
412 if (major > MAXMAJ32 || minor > MAXMIN32)
415 return (((dev32_t)major << NBITSMINOR32) | minor);
422 zfs_znode_dmu_init(znode_t *zp, dmu_buf_t *db)
425 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
427 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp)));
429 mutex_enter(&zp->z_lock);
431 ASSERT(zp->z_dbuf == NULL);
433 nzp = dmu_buf_set_user_ie(db, zp, &zp->z_phys, znode_evict_error);
437 * concurrent zgets on this object.
440 panic("existing znode %p for dbuf %p", nzp, db);
443 * Slap on VROOT if we are the root znode
445 if (zp->z_id == zfsvfs->z_root)
446 ZTOV(zp)->v_flag |= VROOT;
448 mutex_exit(&zp->z_lock);
453 zfs_znode_dmu_fini(znode_t *zp)
455 dmu_buf_t *db = zp->z_dbuf;
456 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp)) || zp->z_unlinked ||
457 RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
458 ASSERT(zp->z_dbuf != NULL);
460 VERIFY(zp == dmu_buf_update_user(db, zp, NULL, NULL, NULL));
461 dmu_buf_rele(db, NULL);
465 * Construct a new znode/vnode and intialize.
467 * This does not do a call to dmu_set_user() that is
468 * up to the caller to do, in case you don't want to
472 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz)
477 zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
479 ASSERT(zp->z_dirlocks == NULL);
480 ASSERT(zp->z_dbuf == NULL);
483 zp->z_zfsvfs = zfsvfs;
485 zp->z_atime_dirty = 0;
488 zp->z_id = db->db_object;
490 zp->z_seq = 0x7A4653;
496 zfs_znode_dmu_init(zp, db);
498 zp->z_gen = zp->z_phys->zp_gen;
500 mutex_enter(&zfsvfs->z_znodes_lock);
501 list_insert_tail(&zfsvfs->z_all_znodes, zp);
502 mutex_exit(&zfsvfs->z_znodes_lock);
504 vp->v_vfsp = zfsvfs->z_parent->z_vfs;
505 vp->v_type = IFTOVT((mode_t)zp->z_phys->zp_mode);
507 switch (vp->v_type) {
509 if (zp->z_phys->zp_flags & ZFS_XATTR) {
510 vn_setops(vp, zfs_xdvnodeops);
511 vp->v_flag |= V_XATTRDIR;
513 vn_setops(vp, zfs_dvnodeops);
515 zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
519 vp->v_rdev = zfs_cmpldev(zp->z_phys->zp_rdev);
524 vn_setops(vp, zfs_fvnodeops);
527 vp->v_flag |= VMODSORT;
528 vn_setops(vp, zfs_fvnodeops);
531 vn_setops(vp, zfs_symvnodeops);
534 vn_setops(vp, zfs_evnodeops);
538 VFS_HOLD(zfsvfs->z_vfs);
543 * Create a new DMU object to hold a zfs znode.
545 * IN: dzp - parent directory for new znode
546 * vap - file attributes for new znode
547 * tx - dmu transaction id for zap operations
548 * cr - credentials of caller
550 * IS_ROOT_NODE - new object will be root
551 * IS_XATTR - new object is an attribute
552 * IS_REPLAY - intent log replay
553 * bonuslen - length of bonus buffer
554 * setaclp - File/Dir initial ACL
555 * fuidp - Tracks fuid allocation.
557 * OUT: zpp - allocated znode
561 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
562 uint_t flag, znode_t **zpp, int bonuslen, zfs_acl_t *setaclp,
563 zfs_fuid_info_t **fuidp)
567 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
572 ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
574 if (zfsvfs->z_assign >= TXG_INITIAL) { /* ZIL replay */
575 obj = vap->va_nodeid;
577 now = vap->va_ctime; /* see zfs_replay_create() */
578 gen = vap->va_nblocks; /* ditto */
582 gen = dmu_tx_get_txg(tx);
586 * Create a new DMU object.
589 * There's currently no mechanism for pre-reading the blocks that will
590 * be to needed allocate a new object, so we accept the small chance
591 * that there will be an i/o error and we will fail one of the
594 if (vap->va_type == VDIR) {
595 if (flag & IS_REPLAY) {
596 err = zap_create_claim_norm(zfsvfs->z_os, obj,
597 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
598 DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
599 ASSERT3U(err, ==, 0);
601 obj = zap_create_norm(zfsvfs->z_os,
602 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
603 DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
606 if (flag & IS_REPLAY) {
607 err = dmu_object_claim(zfsvfs->z_os, obj,
608 DMU_OT_PLAIN_FILE_CONTENTS, 0,
609 DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
610 ASSERT3U(err, ==, 0);
612 obj = dmu_object_alloc(zfsvfs->z_os,
613 DMU_OT_PLAIN_FILE_CONTENTS, 0,
614 DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
617 VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, obj, NULL, &db));
618 dmu_buf_will_dirty(db, tx);
621 * Initialize the znode physical data to zero.
623 ASSERT(db->db_size >= sizeof (znode_phys_t));
624 bzero(db->db_data, db->db_size);
628 * If this is the root, fix up the half-initialized parent pointer
629 * to reference the just-allocated physical data area.
631 if (flag & IS_ROOT_NODE) {
638 * If parent is an xattr, so am I.
640 if (dzp->z_phys->zp_flags & ZFS_XATTR)
643 if (vap->va_type == VBLK || vap->va_type == VCHR) {
644 pzp->zp_rdev = zfs_expldev(vap->va_rdev);
647 if (zfsvfs->z_use_fuids)
648 pzp->zp_flags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
650 if (vap->va_type == VDIR) {
651 pzp->zp_size = 2; /* contents ("." and "..") */
652 pzp->zp_links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
655 pzp->zp_parent = dzp->z_id;
657 pzp->zp_flags |= ZFS_XATTR;
661 ZFS_TIME_ENCODE(&now, pzp->zp_crtime);
662 ZFS_TIME_ENCODE(&now, pzp->zp_ctime);
664 if (vap->va_mask & AT_ATIME) {
665 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
667 ZFS_TIME_ENCODE(&now, pzp->zp_atime);
670 if (vap->va_mask & AT_MTIME) {
671 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);
673 ZFS_TIME_ENCODE(&now, pzp->zp_mtime);
676 pzp->zp_mode = MAKEIMODE(vap->va_type, vap->va_mode);
677 if (!(flag & IS_ROOT_NODE)) {
678 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj)
679 *zpp = zfs_znode_alloc(zfsvfs, db, 0);
680 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
683 * If we are creating the root node, the "parent" we
684 * passed in is the znode for the root.
688 zfs_perm_init(*zpp, dzp, flag, vap, tx, cr, setaclp, fuidp);
692 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap)
696 xoap = xva_getxoptattr(xvap);
699 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
700 ZFS_TIME_ENCODE(&xoap->xoa_createtime, zp->z_phys->zp_crtime);
701 XVA_SET_RTN(xvap, XAT_CREATETIME);
703 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
704 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly);
705 XVA_SET_RTN(xvap, XAT_READONLY);
707 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
708 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden);
709 XVA_SET_RTN(xvap, XAT_HIDDEN);
711 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
712 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system);
713 XVA_SET_RTN(xvap, XAT_SYSTEM);
715 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
716 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive);
717 XVA_SET_RTN(xvap, XAT_ARCHIVE);
719 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
720 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable);
721 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
723 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
724 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink);
725 XVA_SET_RTN(xvap, XAT_NOUNLINK);
727 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
728 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly);
729 XVA_SET_RTN(xvap, XAT_APPENDONLY);
731 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
732 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump);
733 XVA_SET_RTN(xvap, XAT_NODUMP);
735 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
736 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque);
737 XVA_SET_RTN(xvap, XAT_OPAQUE);
739 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
740 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
741 xoap->xoa_av_quarantined);
742 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
744 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
745 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified);
746 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
748 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
749 (void) memcpy(zp->z_phys + 1, xoap->xoa_av_scanstamp,
750 sizeof (xoap->xoa_av_scanstamp));
751 zp->z_phys->zp_flags |= ZFS_BONUS_SCANSTAMP;
752 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
757 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
759 dmu_object_info_t doi;
766 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
768 err = dmu_bonus_hold(zfsvfs->z_os, obj_num, NULL, &db);
770 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
774 dmu_object_info_from_db(db, &doi);
775 if (doi.doi_bonus_type != DMU_OT_ZNODE ||
776 doi.doi_bonus_size < sizeof (znode_phys_t)) {
777 dmu_buf_rele(db, NULL);
778 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
782 zp = dmu_buf_get_user(db);
784 mutex_enter(&zp->z_lock);
787 * Since we do immediate eviction of the z_dbuf, we
788 * should never find a dbuf with a znode that doesn't
789 * know about the dbuf.
791 ASSERT3P(zp->z_dbuf, ==, db);
792 ASSERT3U(zp->z_id, ==, obj_num);
793 if (zp->z_unlinked) {
800 dmu_buf_rele(db, NULL);
801 mutex_exit(&zp->z_lock);
802 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
807 * Not found create new znode/vnode
809 zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size);
810 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
816 zfs_rezget(znode_t *zp)
818 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
819 dmu_object_info_t doi;
821 uint64_t obj_num = zp->z_id;
824 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
826 err = dmu_bonus_hold(zfsvfs->z_os, obj_num, NULL, &db);
828 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
832 dmu_object_info_from_db(db, &doi);
833 if (doi.doi_bonus_type != DMU_OT_ZNODE ||
834 doi.doi_bonus_size < sizeof (znode_phys_t)) {
835 dmu_buf_rele(db, NULL);
836 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
840 if (((znode_phys_t *)db->db_data)->zp_gen != zp->z_gen) {
841 dmu_buf_rele(db, NULL);
842 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
846 zfs_znode_dmu_init(zp, db);
847 zp->z_unlinked = (zp->z_phys->zp_links == 0);
848 zp->z_blksz = doi.doi_data_block_size;
850 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
856 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
858 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
859 uint64_t obj = zp->z_id;
861 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
862 if (zp->z_phys->zp_acl.z_acl_extern_obj) {
863 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
864 zp->z_phys->zp_acl.z_acl_extern_obj, tx));
866 VERIFY(0 == dmu_object_free(zfsvfs->z_os, obj, tx));
867 zfs_znode_dmu_fini(zp);
868 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
873 zfs_zinactive(znode_t *zp)
875 vnode_t *vp = ZTOV(zp);
876 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
877 uint64_t z_id = zp->z_id;
879 ASSERT(zp->z_dbuf && zp->z_phys);
882 * Don't allow a zfs_zget() while were trying to release this znode
884 ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
886 mutex_enter(&zp->z_lock);
887 mutex_enter(&vp->v_lock);
889 if (vp->v_count > 0 || vn_has_cached_data(vp)) {
891 * If the hold count is greater than zero, somebody has
892 * obtained a new reference on this znode while we were
893 * processing it here, so we are done. If we still have
894 * mapped pages then we are also done, since we don't
895 * want to inactivate the znode until the pages get pushed.
897 * XXX - if vn_has_cached_data(vp) is true, but count == 0,
898 * this seems like it would leave the znode hanging with
899 * no chance to go inactive...
901 mutex_exit(&vp->v_lock);
902 mutex_exit(&zp->z_lock);
903 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
906 mutex_exit(&vp->v_lock);
909 * If this was the last reference to a file with no links,
910 * remove the file from the file system.
912 if (zp->z_unlinked) {
913 mutex_exit(&zp->z_lock);
914 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
918 mutex_exit(&zp->z_lock);
919 zfs_znode_dmu_fini(zp);
920 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
925 zfs_znode_free(znode_t *zp)
927 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
929 vn_invalid(ZTOV(zp));
931 mutex_enter(&zfsvfs->z_znodes_lock);
932 list_remove(&zfsvfs->z_all_znodes, zp);
933 mutex_exit(&zfsvfs->z_znodes_lock);
935 kmem_cache_free(znode_cache, zp);
937 VFS_RELE(zfsvfs->z_vfs);
941 zfs_time_stamper_locked(znode_t *zp, uint_t flag, dmu_tx_t *tx)
945 ASSERT(MUTEX_HELD(&zp->z_lock));
950 dmu_buf_will_dirty(zp->z_dbuf, tx);
951 zp->z_atime_dirty = 0;
954 zp->z_atime_dirty = 1;
958 ZFS_TIME_ENCODE(&now, zp->z_phys->zp_atime);
960 if (flag & AT_MTIME) {
961 ZFS_TIME_ENCODE(&now, zp->z_phys->zp_mtime);
962 if (zp->z_zfsvfs->z_use_fuids)
963 zp->z_phys->zp_flags |= (ZFS_ARCHIVE | ZFS_AV_MODIFIED);
966 if (flag & AT_CTIME) {
967 ZFS_TIME_ENCODE(&now, zp->z_phys->zp_ctime);
968 if (zp->z_zfsvfs->z_use_fuids)
969 zp->z_phys->zp_flags |= ZFS_ARCHIVE;
974 * Update the requested znode timestamps with the current time.
975 * If we are in a transaction, then go ahead and mark the znode
976 * dirty in the transaction so the timestamps will go to disk.
977 * Otherwise, we will get pushed next time the znode is updated
978 * in a transaction, or when this znode eventually goes inactive.
981 * 1 - Only the ACCESS time is ever updated outside of a transaction.
982 * 2 - Multiple consecutive updates will be collapsed into a single
983 * znode update by the transaction grouping semantics of the DMU.
986 zfs_time_stamper(znode_t *zp, uint_t flag, dmu_tx_t *tx)
988 mutex_enter(&zp->z_lock);
989 zfs_time_stamper_locked(zp, flag, tx);
990 mutex_exit(&zp->z_lock);
994 * Grow the block size for a file.
996 * IN: zp - znode of file to free data in.
997 * size - requested block size
998 * tx - open transaction.
1000 * NOTE: this function assumes that the znode is write locked.
1003 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1008 if (size <= zp->z_blksz)
1011 * If the file size is already greater than the current blocksize,
1012 * we will not grow. If there is more than one block in a file,
1013 * the blocksize cannot change.
1015 if (zp->z_blksz && zp->z_phys->zp_size > zp->z_blksz)
1018 error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
1020 if (error == ENOTSUP)
1022 ASSERT3U(error, ==, 0);
1024 /* What blocksize did we actually get? */
1025 dmu_object_size_from_db(zp->z_dbuf, &zp->z_blksz, &dummy);
1029 * This is a dummy interface used when pvn_vplist_dirty() should *not*
1030 * be calling back into the fs for a putpage(). E.g.: when truncating
1031 * a file, the pages being "thrown away* don't need to be written out.
1035 zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1036 int flags, cred_t *cr)
1043 * Free space in a file.
1045 * IN: zp - znode of file to free data in.
1046 * off - start of section to free.
1047 * len - length of section to free (0 => to EOF).
1048 * flag - current file open mode flags.
1050 * RETURN: 0 if success
1051 * error code if failure
1054 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1056 vnode_t *vp = ZTOV(zp);
1058 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1059 zilog_t *zilog = zfsvfs->z_log;
1061 uint64_t end = off + len;
1062 uint64_t size, new_blksz;
1063 uint64_t pflags = zp->z_phys->zp_flags;
1066 if ((pflags & (ZFS_IMMUTABLE|ZFS_READONLY)) ||
1067 off < zp->z_phys->zp_size && (pflags & ZFS_APPENDONLY))
1070 if (ZTOV(zp)->v_type == VFIFO)
1074 * If we will change zp_size then lock the whole file,
1075 * otherwise just lock the range being freed.
1077 if (len == 0 || off + len > zp->z_phys->zp_size) {
1078 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1080 rl = zfs_range_lock(zp, off, len, RL_WRITER);
1081 /* recheck, in case zp_size changed */
1082 if (off + len > zp->z_phys->zp_size) {
1083 /* lost race: file size changed, lock whole file */
1084 zfs_range_unlock(rl);
1085 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1090 * Nothing to do if file already at desired length.
1092 size = zp->z_phys->zp_size;
1093 if (len == 0 && size == off && off != 0) {
1094 zfs_range_unlock(rl);
1099 * Check for any locks in the region to be freed.
1101 if (MANDLOCK(vp, (mode_t)zp->z_phys->zp_mode)) {
1102 uint64_t start = off;
1103 uint64_t extent = len;
1107 extent += off - size;
1108 } else if (len == 0) {
1109 extent = size - off;
1111 if (error = chklock(vp, FWRITE, start, extent, flag, NULL)) {
1112 zfs_range_unlock(rl);
1117 tx = dmu_tx_create(zfsvfs->z_os);
1118 dmu_tx_hold_bonus(tx, zp->z_id);
1121 (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1123 * We are growing the file past the current block size.
1125 if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
1126 ASSERT(!ISP2(zp->z_blksz));
1127 new_blksz = MIN(end, SPA_MAXBLOCKSIZE);
1129 new_blksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
1131 dmu_tx_hold_write(tx, zp->z_id, 0, MIN(end, new_blksz));
1132 } else if (off < size) {
1134 * If len == 0, we are truncating the file.
1136 dmu_tx_hold_free(tx, zp->z_id, off, len ? len : DMU_OBJECT_END);
1139 error = dmu_tx_assign(tx, zfsvfs->z_assign);
1141 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT)
1144 zfs_range_unlock(rl);
1149 zfs_grow_blocksize(zp, new_blksz, tx);
1151 if (end > size || len == 0)
1152 zp->z_phys->zp_size = end;
1155 objset_t *os = zfsvfs->z_os;
1156 uint64_t rlen = len;
1160 else if (end > size)
1162 VERIFY(0 == dmu_free_range(os, zp->z_id, off, rlen, tx));
1166 zfs_time_stamper(zp, CONTENT_MODIFIED, tx);
1167 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1170 zfs_range_unlock(rl);
1175 * Clear any mapped pages in the truncated region. This has to
1176 * happen outside of the transaction to avoid the possibility of
1177 * a deadlock with someone trying to push a page that we are
1178 * about to invalidate.
1180 rw_enter(&zp->z_map_lock, RW_WRITER);
1181 if (off < size && vn_has_cached_data(vp)) {
1183 uint64_t start = off & PAGEMASK;
1184 int poff = off & PAGEOFFSET;
1186 if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
1188 * We need to zero a partial page.
1190 pagezero(pp, poff, PAGESIZE - poff);
1194 error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
1195 B_INVAL | B_TRUNC, NULL);
1198 rw_exit(&zp->z_map_lock);
1204 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1207 uint64_t moid, doid;
1208 uint64_t version = 0;
1209 uint64_t sense = ZFS_CASE_SENSITIVE;
1213 znode_t *rootzp = NULL;
1219 * First attempt to create master node.
1222 * In an empty objset, there are no blocks to read and thus
1223 * there can be no i/o errors (which we assert below).
1225 moid = MASTER_NODE_OBJ;
1226 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1227 DMU_OT_NONE, 0, tx);
1231 * Set starting attributes.
1234 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1235 /* For the moment we expect all zpl props to be uint64_ts */
1239 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1240 VERIFY(nvpair_value_uint64(elem, &val) == 0);
1241 name = nvpair_name(elem);
1242 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1244 error = zap_update(os, moid, ZPL_VERSION_STR,
1245 8, 1, &version, tx);
1247 error = zap_update(os, moid, name, 8, 1, &val, tx);
1250 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1252 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1255 ASSERT(version != 0);
1258 * Create a delete queue.
1260 doid = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1262 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &doid, tx);
1266 * Create root znode. Create minimal znode/vnode/zfsvfs
1267 * to allow zfs_mknode to work.
1269 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
1270 vattr.va_type = VDIR;
1271 vattr.va_mode = S_IFDIR|0755;
1272 vattr.va_uid = crgetuid(cr);
1273 vattr.va_gid = crgetgid(cr);
1275 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1276 rootzp->z_zfsvfs = &zfsvfs;
1277 rootzp->z_unlinked = 0;
1278 rootzp->z_atime_dirty = 0;
1284 bzero(&zfsvfs, sizeof (zfsvfs_t));
1287 zfsvfs.z_assign = TXG_NOWAIT;
1288 zfsvfs.z_parent = &zfsvfs;
1289 zfsvfs.z_version = version;
1290 zfsvfs.z_use_fuids = USE_FUIDS(version, os);
1291 zfsvfs.z_norm = norm;
1293 * Fold case on file systems that are always or sometimes case
1296 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1297 zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
1299 mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1300 list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
1301 offsetof(znode_t, z_link_node));
1303 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, 0, NULL, NULL);
1304 ASSERT3P(zp, ==, rootzp);
1305 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1308 ZTOV(rootzp)->v_count = 0;
1309 dmu_buf_rele(rootzp->z_dbuf, NULL);
1310 rootzp->z_dbuf = NULL;
1311 kmem_cache_free(znode_cache, rootzp);
1314 #endif /* _KERNEL */
1316 * Given an object number, return its parent object number and whether
1317 * or not the object is an extended attribute directory.
1320 zfs_obj_to_pobj(objset_t *osp, uint64_t obj, uint64_t *pobjp, int *is_xattrdir)
1323 dmu_object_info_t doi;
1327 if ((error = dmu_bonus_hold(osp, obj, FTAG, &db)) != 0)
1330 dmu_object_info_from_db(db, &doi);
1331 if (doi.doi_bonus_type != DMU_OT_ZNODE ||
1332 doi.doi_bonus_size < sizeof (znode_phys_t)) {
1333 dmu_buf_rele(db, FTAG);
1338 *pobjp = zp->zp_parent;
1339 *is_xattrdir = ((zp->zp_flags & ZFS_XATTR) != 0) &&
1340 S_ISDIR(zp->zp_mode);
1341 dmu_buf_rele(db, FTAG);
1347 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
1349 char *path = buf + len - 1;
1356 char component[MAXNAMELEN + 2];
1360 if ((error = zfs_obj_to_pobj(osp, obj, &pobj,
1361 &is_xattrdir)) != 0)
1372 (void) sprintf(component + 1, "<xattrdir>");
1374 error = zap_value_search(osp, pobj, obj,
1375 ZFS_DIRENT_OBJ(-1ULL), component + 1);
1380 complen = strlen(component);
1382 ASSERT(path >= buf);
1383 bcopy(component, path, complen);
1388 (void) memmove(buf, path, buf + len - path);