* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
+#include <sys/zfs_vnops.h>
+#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/kidmap.h>
+#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
+#include <sys/sa.h>
+#include <sys/zfs_sa.h>
+#include <sys/zfs_stat.h>
#include "zfs_prop.h"
+#include "zfs_comutil.h"
/*
* Define ZNODE_STATS to turn on statistic gathering. By default, it is only
#define ZNODE_STAT_ADD(stat) /* nothing */
#endif /* ZNODE_STATS */
-#define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3))
-#define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
-
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
-static kmem_cache_t *znode_cache = NULL;
-/*ARGSUSED*/
-static void
-znode_evict_error(dmu_buf_t *dbuf, void *user_ptr)
-{
- /*
- * We should never drop all dbuf refs without first clearing
- * the eviction callback.
- */
- panic("evicting znode %p\n", user_ptr);
-}
+static kmem_cache_t *znode_cache = NULL;
/*ARGSUSED*/
static int
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
-
- zp->z_vnode = vn_alloc(kmflags);
- if (zp->z_vnode == NULL) {
- return (-1);
- }
- ZTOV(zp)->v_data = zp;
-
+ inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&zp->z_range_avl, zfs_range_compare,
sizeof (rl_t), offsetof(rl_t, r_node));
- zp->z_dbuf = NULL;
zp->z_dirlocks = NULL;
+ zp->z_acl_cached = NULL;
+ zp->z_moved = 0;
return (0);
}
{
znode_t *zp = buf;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT(ZTOV(zp)->v_data == zp);
- vn_free(ZTOV(zp));
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
avl_destroy(&zp->z_range_avl);
mutex_destroy(&zp->z_range_lock);
- ASSERT(zp->z_dbuf == NULL);
ASSERT(zp->z_dirlocks == NULL);
-}
-
-#ifdef ZNODE_STATS
-static struct {
- uint64_t zms_zfsvfs_invalid;
- uint64_t zms_zfsvfs_unmounted;
- uint64_t zms_zfsvfs_recheck_invalid;
- uint64_t zms_obj_held;
- uint64_t zms_vnode_locked;
- uint64_t zms_not_only_dnlc;
-} znode_move_stats;
-#endif /* ZNODE_STATS */
-
-static void
-zfs_znode_move_impl(znode_t *ozp, znode_t *nzp)
-{
- vnode_t *vp;
-
- /* Copy fields. */
- nzp->z_zfsvfs = ozp->z_zfsvfs;
-
- /* Swap vnodes. */
- vp = nzp->z_vnode;
- nzp->z_vnode = ozp->z_vnode;
- ozp->z_vnode = vp; /* let destructor free the overwritten vnode */
- ZTOV(ozp)->v_data = ozp;
- ZTOV(nzp)->v_data = nzp;
-
- nzp->z_id = ozp->z_id;
- ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */
- ASSERT(avl_numnodes(&ozp->z_range_avl) == 0);
- nzp->z_unlinked = ozp->z_unlinked;
- nzp->z_atime_dirty = ozp->z_atime_dirty;
- nzp->z_zn_prefetch = ozp->z_zn_prefetch;
- nzp->z_blksz = ozp->z_blksz;
- nzp->z_seq = ozp->z_seq;
- nzp->z_mapcnt = ozp->z_mapcnt;
- nzp->z_last_itx = ozp->z_last_itx;
- nzp->z_gen = ozp->z_gen;
- nzp->z_sync_cnt = ozp->z_sync_cnt;
- nzp->z_phys = ozp->z_phys;
- nzp->z_dbuf = ozp->z_dbuf;
-
- /* Update back pointers. */
- (void) dmu_buf_update_user(nzp->z_dbuf, ozp, nzp, &nzp->z_phys,
- znode_evict_error);
-
- /*
- * Invalidate the original znode by clearing fields that provide a
- * pointer back to the znode. Set the low bit of the vfs pointer to
- * ensure that zfs_znode_move() recognizes the znode as invalid in any
- * subsequent callback.
- */
- ozp->z_dbuf = NULL;
- POINTER_INVALIDATE(&ozp->z_zfsvfs);
-}
-
-/*
- * Wrapper function for ZFS_ENTER that returns 0 if successful and otherwise
- * returns a non-zero error code.
- */
-static int
-zfs_enter(zfsvfs_t *zfsvfs)
-{
- ZFS_ENTER(zfsvfs);
- return (0);
-}
-
-/*ARGSUSED*/
-static kmem_cbrc_t
-zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg)
-{
- znode_t *ozp = buf, *nzp = newbuf;
- zfsvfs_t *zfsvfs;
- vnode_t *vp;
-
- /*
- * The znode is on the file system's list of known znodes if the vfs
- * pointer is valid. We set the low bit of the vfs pointer when freeing
- * the znode to invalidate it, and the memory patterns written by kmem
- * (baddcafe and deadbeef) set at least one of the two low bits. A newly
- * created znode sets the vfs pointer last of all to indicate that the
- * znode is known and in a valid state to be moved by this function.
- */
- zfsvfs = ozp->z_zfsvfs;
- if (!POINTER_IS_VALID(zfsvfs)) {
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- /*
- * Ensure that the filesystem is not unmounted during the move.
- */
- if (zfs_enter(zfsvfs) != 0) { /* ZFS_ENTER */
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- mutex_enter(&zfsvfs->z_znodes_lock);
- /*
- * Recheck the vfs pointer in case the znode was removed just before
- * acquiring the lock.
- */
- if (zfsvfs != ozp->z_zfsvfs) {
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck_invalid);
- return (KMEM_CBRC_DONT_KNOW);
- }
-
- /*
- * At this point we know that as long as we hold z_znodes_lock, the
- * znode cannot be freed and fields within the znode can be safely
- * accessed. Now, prevent a race with zfs_zget().
- */
- if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) {
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_obj_held);
- return (KMEM_CBRC_LATER);
- }
-
- vp = ZTOV(ozp);
- if (mutex_tryenter(&vp->v_lock) == 0) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked);
- return (KMEM_CBRC_LATER);
- }
-
- /* Only move znodes that are referenced _only_ by the DNLC. */
- if (vp->v_count != 1 || !vn_in_dnlc(vp)) {
- mutex_exit(&vp->v_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
- ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc);
- return (KMEM_CBRC_LATER);
- }
-
- /*
- * The znode is known and in a valid state to move. We're holding the
- * locks needed to execute the critical section.
- */
- zfs_znode_move_impl(ozp, nzp);
- mutex_exit(&vp->v_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
-
- list_link_replace(&ozp->z_link_node, &nzp->z_link_node);
- mutex_exit(&zfsvfs->z_znodes_lock);
- ZFS_EXIT(zfsvfs);
-
- return (KMEM_CBRC_YES);
+ ASSERT(zp->z_acl_cached == NULL);
}
void
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
- zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
- kmem_cache_set_move(znode_cache, zfs_znode_move);
+ zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_KMEM);
}
void
zfs_znode_fini(void)
{
/*
- * Cleanup vfs & vnode ops
- */
- zfs_remove_op_tables();
-
- /*
* Cleanup zcache
*/
if (znode_cache)
znode_cache = NULL;
}
-struct vnodeops *zfs_dvnodeops;
-struct vnodeops *zfs_fvnodeops;
-struct vnodeops *zfs_symvnodeops;
-struct vnodeops *zfs_xdvnodeops;
-struct vnodeops *zfs_evnodeops;
-
-void
-zfs_remove_op_tables()
-{
- /*
- * Remove vfs ops
- */
- ASSERT(zfsfstype);
- (void) vfs_freevfsops_by_type(zfsfstype);
- zfsfstype = 0;
-
- /*
- * Remove vnode ops
- */
- if (zfs_dvnodeops)
- vn_freevnodeops(zfs_dvnodeops);
- if (zfs_fvnodeops)
- vn_freevnodeops(zfs_fvnodeops);
- if (zfs_symvnodeops)
- vn_freevnodeops(zfs_symvnodeops);
- if (zfs_xdvnodeops)
- vn_freevnodeops(zfs_xdvnodeops);
- if (zfs_evnodeops)
- vn_freevnodeops(zfs_evnodeops);
-
- zfs_dvnodeops = NULL;
- zfs_fvnodeops = NULL;
- zfs_symvnodeops = NULL;
- zfs_xdvnodeops = NULL;
- zfs_evnodeops = NULL;
-}
-
-extern const fs_operation_def_t zfs_dvnodeops_template[];
-extern const fs_operation_def_t zfs_fvnodeops_template[];
-extern const fs_operation_def_t zfs_xdvnodeops_template[];
-extern const fs_operation_def_t zfs_symvnodeops_template[];
-extern const fs_operation_def_t zfs_evnodeops_template[];
-
int
-zfs_create_op_tables()
+zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx)
{
+#ifdef HAVE_SHARE
+ zfs_acl_ids_t acl_ids;
+ vattr_t vattr;
+ znode_t *sharezp;
+ vnode_t *vp;
+ znode_t *zp;
int error;
- /*
- * zfs_dvnodeops can be set if mod_remove() calls mod_installfs()
- * due to a failure to remove the the 2nd modlinkage (zfs_modldrv).
- * In this case we just return as the ops vectors are already set up.
- */
- if (zfs_dvnodeops)
- return (0);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template,
- &zfs_dvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template,
- &zfs_fvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template,
- &zfs_symvnodeops);
- if (error)
- return (error);
-
- error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template,
- &zfs_xdvnodeops);
- if (error)
- return (error);
+ vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
+ vattr.va_mode = S_IFDIR | 0555;
+ vattr.va_uid = crgetuid(kcred);
+ vattr.va_gid = crgetgid(kcred);
+
+ sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ sharezp->z_moved = 0;
+ sharezp->z_unlinked = 0;
+ sharezp->z_atime_dirty = 0;
+ sharezp->z_zfsvfs = zfsvfs;
+ sharezp->z_is_sa = zfsvfs->z_use_sa;
+
+ vp = ZTOV(sharezp);
+ vn_reinit(vp);
+ vp->v_type = VDIR;
- error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template,
- &zfs_evnodeops);
+ VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
+ kcred, NULL, &acl_ids));
+ zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
+ ASSERT3P(zp, ==, sharezp);
+ ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
+ POINTER_INVALIDATE(&sharezp->z_zfsvfs);
+ error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
+ zfsvfs->z_shares_dir = sharezp->z_id;
+
+ zfs_acl_ids_free(&acl_ids);
+ // ZTOV(sharezp)->v_count = 0;
+ sa_handle_destroy(sharezp->z_sa_hdl);
+ kmem_cache_free(znode_cache, sharezp);
return (error);
+#else
+ return (0);
+#endif /* HAVE_SHARE */
}
-/*
- * zfs_init_fs - Initialize the zfsvfs struct and the file system
- * incore "master" object. Verify version compatibility.
- */
-int
-zfs_init_fs(zfsvfs_t *zfsvfs, znode_t **zpp)
+static void
+zfs_znode_sa_init(zfs_sb_t *zsb, znode_t *zp,
+ dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
- extern int zfsfstype;
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zsb, zp->z_id)));
- objset_t *os = zfsvfs->z_os;
- int i, error;
- uint64_t fsid_guid;
- uint64_t zval;
-
- *zpp = NULL;
+ mutex_enter(&zp->z_lock);
- error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
- if (error) {
- return (error);
- } else if (zfsvfs->z_version > ZPL_VERSION) {
- (void) printf("Mismatched versions: File system "
- "is version %llu on-disk format, which is "
- "incompatible with this software version %lld!",
- (u_longlong_t)zfsvfs->z_version, ZPL_VERSION);
- return (ENOTSUP);
+ ASSERT(zp->z_sa_hdl == NULL);
+ ASSERT(zp->z_acl_cached == NULL);
+ if (sa_hdl == NULL) {
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, zp,
+ SA_HDL_SHARED, &zp->z_sa_hdl));
+ } else {
+ zp->z_sa_hdl = sa_hdl;
+ sa_set_userp(sa_hdl, zp);
}
- if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
- return (error);
- zfsvfs->z_norm = (int)zval;
- if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
- return (error);
- zfsvfs->z_utf8 = (zval != 0);
- if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
- return (error);
- zfsvfs->z_case = (uint_t)zval;
- /*
- * Fold case on file systems that are always or sometimes case
- * insensitive.
- */
- if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
- zfsvfs->z_case == ZFS_CASE_MIXED)
- zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
-
- /*
- * The fsid is 64 bits, composed of an 8-bit fs type, which
- * separates our fsid from any other filesystem types, and a
- * 56-bit objset unique ID. The objset unique ID is unique to
- * all objsets open on this system, provided by unique_create().
- * The 8-bit fs type must be put in the low bits of fsid[1]
- * because that's where other Solaris filesystems put it.
- */
- fsid_guid = dmu_objset_fsid_guid(os);
- ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
- zfsvfs->z_vfs->vfs_fsid.val[0] = fsid_guid;
- zfsvfs->z_vfs->vfs_fsid.val[1] = ((fsid_guid>>32) << 8) |
- zfsfstype & 0xFF;
-
- error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
- &zfsvfs->z_root);
- if (error)
- return (error);
- ASSERT(zfsvfs->z_root != 0);
-
- error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
- &zfsvfs->z_unlinkedobj);
- if (error)
- return (error);
+ zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
- /*
- * Initialize zget mutex's
- */
- for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
+ mutex_exit(&zp->z_lock);
+}
- error = zfs_zget(zfsvfs, zfsvfs->z_root, zpp);
- if (error) {
- /*
- * On error, we destroy the mutexes here since it's not
- * possible for the caller to determine if the mutexes were
- * initialized properly.
- */
- for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
- mutex_destroy(&zfsvfs->z_hold_mtx[i]);
- return (error);
- }
- ASSERT3U((*zpp)->z_id, ==, zfsvfs->z_root);
- error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
- &zfsvfs->z_fuid_obj);
- if (error == ENOENT)
- error = 0;
+void
+zfs_znode_dmu_fini(znode_t *zp)
+{
+ ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(ZTOZSB(zp), zp->z_id)) ||
+ zp->z_unlinked ||
+ RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
- return (0);
+ sa_handle_destroy(zp->z_sa_hdl);
+ zp->z_sa_hdl = NULL;
}
/*
- * define a couple of values we need available
- * for both 64 and 32 bit environments.
+ * Called by new_inode() to allocate a new inode.
*/
-#ifndef NBITSMINOR64
-#define NBITSMINOR64 32
-#endif
-#ifndef MAXMAJ64
-#define MAXMAJ64 0xffffffffUL
-#endif
-#ifndef MAXMIN64
-#define MAXMIN64 0xffffffffUL
-#endif
-
-/*
- * Create special expldev for ZFS private use.
- * Can't use standard expldev since it doesn't do
- * what we want. The standard expldev() takes a
- * dev32_t in LP64 and expands it to a long dev_t.
- * We need an interface that takes a dev32_t in ILP32
- * and expands it to a long dev_t.
- */
-static uint64_t
-zfs_expldev(dev_t dev)
+int
+zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
-#ifndef _LP64
- major_t major = (major_t)dev >> NBITSMINOR32 & MAXMAJ32;
- return (((uint64_t)major << NBITSMINOR64) |
- ((minor_t)dev & MAXMIN32));
-#else
- return (dev);
-#endif
+ znode_t *zp;
+
+ zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ *ip = ZTOI(zp);
+
+ return (0);
}
/*
- * Special cmpldev for ZFS private use.
- * Can't use standard cmpldev since it takes
- * a long dev_t and compresses it to dev32_t in
- * LP64. We need to do a compaction of a long dev_t
- * to a dev32_t in ILP32.
+ * Called in multiple places when an inode should be destroyed.
*/
-dev_t
-zfs_cmpldev(uint64_t dev)
+void
+zfs_inode_destroy(struct inode *ip)
{
-#ifndef _LP64
- minor_t minor = (minor_t)dev & MAXMIN64;
- major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ZTOZSB(zp);
- if (major > MAXMAJ32 || minor > MAXMIN32)
- return (NODEV32);
+ mutex_enter(&zsb->z_znodes_lock);
+ list_remove(&zsb->z_all_znodes, zp);
+ mutex_exit(&zsb->z_znodes_lock);
- return (((dev32_t)major << NBITSMINOR32) | minor);
-#else
- return (dev);
-#endif
+ if (zp->z_acl_cached) {
+ zfs_acl_free(zp->z_acl_cached);
+ zp->z_acl_cached = NULL;
+ }
+
+ kmem_cache_free(znode_cache, zp);
}
static void
-zfs_znode_dmu_init(zfsvfs_t *zfsvfs, znode_t *zp, dmu_buf_t *db)
+zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip)
{
- znode_t *nzp;
+ uint64_t rdev;
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
-
- mutex_enter(&zp->z_lock);
-
- ASSERT(zp->z_dbuf == NULL);
- zp->z_dbuf = db;
- nzp = dmu_buf_set_user_ie(db, zp, &zp->z_phys, znode_evict_error);
+ switch (ip->i_mode & S_IFMT) {
+ case S_IFREG:
+ ip->i_op = &zpl_inode_operations;
+ ip->i_fop = &zpl_file_operations;
+ ip->i_mapping->a_ops = &zpl_address_space_operations;
+ break;
- /*
- * there should be no
- * concurrent zgets on this object.
- */
- if (nzp != NULL)
- panic("existing znode %p for dbuf %p", (void *)nzp, (void *)db);
+ case S_IFDIR:
+ ip->i_op = &zpl_dir_inode_operations;
+ ip->i_fop = &zpl_dir_file_operations;
+ ITOZ(ip)->z_zn_prefetch = B_TRUE;
+ break;
- /*
- * Slap on VROOT if we are the root znode
- */
- if (zp->z_id == zfsvfs->z_root)
- ZTOV(zp)->v_flag |= VROOT;
+ case S_IFLNK:
+ ip->i_op = &zpl_symlink_inode_operations;
+ break;
- mutex_exit(&zp->z_lock);
- vn_exists(ZTOV(zp));
-}
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ VERIFY(sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zsb),
+ &rdev, sizeof (rdev)) == 0);
+ init_special_inode(ip, ip->i_mode, rdev);
+ ip->i_op = &zpl_special_inode_operations;
+ break;
-void
-zfs_znode_dmu_fini(znode_t *zp)
-{
- dmu_buf_t *db = zp->z_dbuf;
- ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
- zp->z_unlinked ||
- RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
- ASSERT(zp->z_dbuf != NULL);
- zp->z_dbuf = NULL;
- VERIFY(zp == dmu_buf_update_user(db, zp, NULL, NULL, NULL));
- dmu_buf_rele(db, NULL);
+ default:
+ printk("ZFS: Invalid mode: 0x%x\n", ip->i_mode);
+ VERIFY(0);
+ }
}
/*
- * Construct a new znode/vnode and intialize.
+ * Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
-zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz)
+zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
+ dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
{
znode_t *zp;
- vnode_t *vp;
+ struct inode *ip;
+ uint64_t parent;
+ sa_bulk_attr_t bulk[9];
+ int count = 0;
- zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ ASSERT(zsb != NULL);
- ASSERT(zp->z_dirlocks == NULL);
- ASSERT(zp->z_dbuf == NULL);
- ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
+ ip = new_inode(zsb->z_sb);
+ if (ip == NULL)
+ return (NULL);
- /*
- * Defer setting z_zfsvfs until the znode is ready to be a candidate for
- * the zfs_znode_move() callback.
- */
- zp->z_phys = NULL;
+ zp = ITOZ(ip);
+ ASSERT(zp->z_dirlocks == NULL);
+ zp->z_moved = 0;
+ zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
zp->z_mapcnt = 0;
- zp->z_last_itx = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
+ zp->z_is_zvol = 0;
+
+ zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
+
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
+ &zp->z_pflags, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
+ &parent, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
+ &zp->z_atime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);
+
+ if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
+ if (hdl == NULL)
+ sa_handle_destroy(zp->z_sa_hdl);
+
+ goto error;
+ }
- vp = ZTOV(zp);
- vn_reinit(vp);
-
- zfs_znode_dmu_init(zfsvfs, zp, db);
-
- zp->z_gen = zp->z_phys->zp_gen;
-
- vp->v_vfsp = zfsvfs->z_parent->z_vfs;
- vp->v_type = IFTOVT((mode_t)zp->z_phys->zp_mode);
+ ip->i_ino = obj;
+ ip->i_mode = zp->z_mode;
+ ip->i_mtime = ip->i_atime = ip->i_ctime = CURRENT_TIME_SEC;
+ zfs_inode_set_ops(zsb, ip);
- switch (vp->v_type) {
- case VDIR:
- if (zp->z_phys->zp_flags & ZFS_XATTR) {
- vn_setops(vp, zfs_xdvnodeops);
- vp->v_flag |= V_XATTRDIR;
- } else {
- vn_setops(vp, zfs_dvnodeops);
- }
- zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
- break;
- case VBLK:
- case VCHR:
- vp->v_rdev = zfs_cmpldev(zp->z_phys->zp_rdev);
- /*FALLTHROUGH*/
- case VFIFO:
- case VSOCK:
- case VDOOR:
- vn_setops(vp, zfs_fvnodeops);
- break;
- case VREG:
- vp->v_flag |= VMODSORT;
- vn_setops(vp, zfs_fvnodeops);
- break;
- case VLNK:
- vn_setops(vp, zfs_symvnodeops);
- break;
- default:
- vn_setops(vp, zfs_evnodeops);
- break;
- }
+ if (insert_inode_locked(ip))
+ goto error;
- mutex_enter(&zfsvfs->z_znodes_lock);
- list_insert_tail(&zfsvfs->z_all_znodes, zp);
+ mutex_enter(&zsb->z_znodes_lock);
+ list_insert_tail(&zsb->z_all_znodes, zp);
membar_producer();
- /*
- * Everything else must be valid before assigning z_zfsvfs makes the
- * znode eligible for zfs_znode_move().
- */
- zp->z_zfsvfs = zfsvfs;
- mutex_exit(&zfsvfs->z_znodes_lock);
+ mutex_exit(&zsb->z_znodes_lock);
- VFS_HOLD(zfsvfs->z_vfs);
+ unlock_new_inode(ip);
return (zp);
+
+error:
+ unlock_new_inode(ip);
+ iput(ip);
+ return NULL;
}
/*
+ * Update the embedded inode given the znode. We should work toward
+ * eliminating this function as soon as possible by removing values
+ * which are duplicated between the znode and inode. If the generic
+ * inode has the correct field it should be used, and the ZFS code
+ * updated to access the inode. This can be done incrementally.
+ */
+void
+zfs_inode_update(znode_t *zp)
+{
+ zfs_sb_t *zsb;
+ struct inode *ip;
+ uint32_t blksize;
+ uint64_t atime[2], mtime[2], ctime[2];
+
+ ASSERT(zp != NULL);
+ zsb = ZTOZSB(zp);
+ ip = ZTOI(zp);
+
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16);
+ sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16);
+
+ spin_lock(&ip->i_lock);
+ ip->i_generation = zp->z_gen;
+ ip->i_uid = zp->z_uid;
+ ip->i_gid = zp->z_gid;
+ ip->i_nlink = zp->z_links;
+ ip->i_mode = zp->z_mode;
+ ip->i_blkbits = SPA_MINBLOCKSHIFT;
+ dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize,
+ (u_longlong_t *)&ip->i_blocks);
+
+ ZFS_TIME_DECODE(&ip->i_atime, atime);
+ ZFS_TIME_DECODE(&ip->i_mtime, mtime);
+ ZFS_TIME_DECODE(&ip->i_ctime, ctime);
+
+ i_size_write(ip, zp->z_size);
+ spin_unlock(&ip->i_lock);
+}
+
+static uint64_t empty_xattr;
+static uint64_t pad[4];
+static zfs_acl_phys_t acl_phys;
+/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_XATTR - new object is an attribute
- * IS_REPLAY - intent log replay
* bonuslen - length of bonus buffer
* setaclp - File/Dir initial ACL
* fuidp - Tracks fuid allocation.
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
- uint_t flag, znode_t **zpp, int bonuslen, zfs_acl_t *setaclp,
- zfs_fuid_info_t **fuidp)
+ uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
+ uint64_t crtime[2], atime[2], mtime[2], ctime[2];
+ uint64_t mode, size, links, parent, pflags;
+ uint64_t dzp_pflags = 0;
+ uint64_t rdev = 0;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
dmu_buf_t *db;
- znode_phys_t *pzp;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
timestruc_t now;
uint64_t gen, obj;
int err;
-
- ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
-
- if (zfsvfs->z_replay) {
+ int bonuslen;
+ sa_handle_t *sa_hdl;
+ dmu_object_type_t obj_type;
+ sa_bulk_attr_t *sa_attrs;
+ int cnt = 0;
+ zfs_acl_locator_cb_t locate = { 0 };
+
+ if (zsb->z_replay) {
obj = vap->va_nodeid;
- flag |= IS_REPLAY;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
} else {
gen = dmu_tx_get_txg(tx);
}
+ obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
+ bonuslen = (obj_type == DMU_OT_SA) ?
+ DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
+
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
- * be to needed allocate a new object, so we accept the small chance
+ * be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
- if (vap->va_type == VDIR) {
- if (flag & IS_REPLAY) {
- err = zap_create_claim_norm(zfsvfs->z_os, obj,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
- DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
+ if (S_ISDIR(vap->va_mode)) {
+ if (zsb->z_replay) {
+ err = zap_create_claim_norm(zsb->z_os, obj,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = zap_create_norm(zfsvfs->z_os,
- zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
- DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
+ obj = zap_create_norm(zsb->z_os,
+ zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS,
+ obj_type, bonuslen, tx);
}
} else {
- if (flag & IS_REPLAY) {
- err = dmu_object_claim(zfsvfs->z_os, obj,
+ if (zsb->z_replay) {
+ err = dmu_object_claim(zsb->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
- DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
+ obj_type, bonuslen, tx);
ASSERT3U(err, ==, 0);
} else {
- obj = dmu_object_alloc(zfsvfs->z_os,
+ obj = dmu_object_alloc(zsb->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
- DMU_OT_ZNODE, sizeof (znode_phys_t) + bonuslen, tx);
+ obj_type, bonuslen, tx);
}
}
- VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, obj, NULL, &db));
- dmu_buf_will_dirty(db, tx);
- /*
- * Initialize the znode physical data to zero.
- */
- ASSERT(db->db_size >= sizeof (znode_phys_t));
- bzero(db->db_data, db->db_size);
- pzp = db->db_data;
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
+ VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
- dzp->z_dbuf = db;
- dzp->z_phys = pzp;
dzp->z_id = obj;
+ } else {
+ dzp_pflags = dzp->z_pflags;
}
/*
* If parent is an xattr, so am I.
*/
- if (dzp->z_phys->zp_flags & ZFS_XATTR)
+ if (dzp_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
-
- if (vap->va_type == VBLK || vap->va_type == VCHR) {
- pzp->zp_rdev = zfs_expldev(vap->va_rdev);
}
- if (zfsvfs->z_use_fuids)
- pzp->zp_flags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
+ if (zsb->z_use_fuids)
+ pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
+ else
+ pflags = 0;
- if (vap->va_type == VDIR) {
- pzp->zp_size = 2; /* contents ("." and "..") */
- pzp->zp_links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
+ if (S_ISDIR(vap->va_mode)) {
+ size = 2; /* contents ("." and "..") */
+ links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
+ } else {
+ size = links = 0;
}
- pzp->zp_parent = dzp->z_id;
+ if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode) ||
+ S_ISFIFO(vap->va_mode) || S_ISSOCK(vap->va_mode))
+ rdev = vap->va_rdev;
+
+ parent = dzp->z_id;
+ mode = acl_ids->z_mode;
if (flag & IS_XATTR)
- pzp->zp_flags |= ZFS_XATTR;
+ pflags |= ZFS_XATTR;
- pzp->zp_gen = gen;
+ /*
+ * No execs denied will be deterimed when zfs_mode_compute() is called.
+ */
+ pflags |= acl_ids->z_aclp->z_hints &
+ (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
+ ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
- ZFS_TIME_ENCODE(&now, pzp->zp_crtime);
- ZFS_TIME_ENCODE(&now, pzp->zp_ctime);
+ ZFS_TIME_ENCODE(&now, crtime);
+ ZFS_TIME_ENCODE(&now, ctime);
+
+ if (vap->va_mask & ATTR_ATIME) {
+ ZFS_TIME_ENCODE(&vap->va_atime, atime);
+ } else {
+ ZFS_TIME_ENCODE(&now, atime);
+ }
- if (vap->va_mask & AT_ATIME) {
- ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
+ if (vap->va_mask & ATTR_MTIME) {
+ ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
- ZFS_TIME_ENCODE(&now, pzp->zp_atime);
+ ZFS_TIME_ENCODE(&now, mtime);
}
- if (vap->va_mask & AT_MTIME) {
- ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);
+ /* Now add in all of the "SA" attributes */
+ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED,
+ &sa_hdl));
+
+ /*
+ * Setup the array of attributes to be replaced/set on the new file
+ *
+ * order for DMU_OT_ZNODE is critical since it needs to be constructed
+ * in the old znode_phys_t format. Don't change this ordering
+ */
+ sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
+
+ if (obj_type == DMU_OT_ZNODE) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
+ NULL, &atime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
+ NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
+ NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
+ NULL, &crtime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
+ NULL, &gen, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
+ NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
+ NULL, &size, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
+ NULL, &parent, 8);
} else {
- ZFS_TIME_ENCODE(&now, pzp->zp_mtime);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb),
+ NULL, &mode, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb),
+ NULL, &size, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb),
+ NULL, &gen, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb),
+ NULL, &acl_ids->z_fuid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb),
+ NULL, &acl_ids->z_fgid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb),
+ NULL, &parent, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
+ NULL, &pflags, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb),
+ NULL, &atime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb),
+ NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb),
+ NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb),
+ NULL, &crtime, 16);
}
- pzp->zp_mode = MAKEIMODE(vap->va_type, vap->va_mode);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8);
+
+ if (obj_type == DMU_OT_ZNODE) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL,
+ &empty_xattr, 8);
+ }
+ if (obj_type == DMU_OT_ZNODE ||
+ (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode) ||
+ S_ISFIFO(vap->va_mode) || S_ISSOCK(vap->va_mode))) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb),
+ NULL, &rdev, 8);
+ }
+ if (obj_type == DMU_OT_ZNODE) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb),
+ NULL, &pflags, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL,
+ &acl_ids->z_fuid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL,
+ &acl_ids->z_fgid, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad,
+ sizeof (uint64_t) * 4);
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL,
+ &acl_phys, sizeof (zfs_acl_phys_t));
+ } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL,
+ &acl_ids->z_aclp->z_acl_count, 8);
+ locate.cb_aclp = acl_ids->z_aclp;
+ SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb),
+ zfs_acl_data_locator, &locate,
+ acl_ids->z_aclp->z_acl_bytes);
+ mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
+ acl_ids->z_fuid, acl_ids->z_fgid);
+ }
+
+ VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
+
if (!(flag & IS_ROOT_NODE)) {
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
- *zpp = zfs_znode_alloc(zfsvfs, db, 0);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
+ *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl);
+ ASSERT(*zpp != NULL);
+ ASSERT(dzp != NULL);
+ err = zpl_xattr_security_init(ZTOI(*zpp), ZTOI(dzp));
+ ASSERT3S(err, ==, 0);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
+
+ (*zpp)->z_sa_hdl = sa_hdl;
}
- zfs_perm_init(*zpp, dzp, flag, vap, tx, cr, setaclp, fuidp);
+
+ (*zpp)->z_pflags = pflags;
+ (*zpp)->z_mode = mode;
+
+ if (obj_type == DMU_OT_ZNODE ||
+ acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
+ err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
+ ASSERT3S(err, ==, 0);
+ }
+ kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
+/*
+ * zfs_xvattr_set only updates the in-core attributes
+ * it is assumed the caller will be doing an sa_bulk_update
+ * to push the changes out
+ */
void
-zfs_xvattr_set(znode_t *zp, xvattr_t *xvap)
+zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
- ZFS_TIME_ENCODE(&xoap->xoa_createtime, zp->z_phys->zp_crtime);
+ uint64_t times[2];
+ ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
+ ×, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
- ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly);
+ ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
- ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden);
+ ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
- ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system);
+ ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
- ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive);
+ ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
- ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable);
+ ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
- ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink);
+ ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
- ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly);
+ ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
- ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump);
+ ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
- ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque);
+ ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
- xoap->xoa_av_quarantined);
+ xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
- ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified);
+ ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
+ zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
- (void) memcpy(zp->z_phys + 1, xoap->xoa_av_scanstamp,
- sizeof (xoap->xoa_av_scanstamp));
- zp->z_phys->zp_flags |= ZFS_BONUS_SCANSTAMP;
+ zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
+ if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
+ ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_REPARSE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
+ ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_OFFLINE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
+ ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_SPARSE);
+ }
}
int
-zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
+zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
int err;
+ sa_handle_t *hdl;
*zpp = NULL;
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
- err = dmu_bonus_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
dmu_object_info_from_db(db, &doi);
- if (doi.doi_bonus_type != DMU_OT_ZNODE ||
- doi.doi_bonus_size < sizeof (znode_phys_t)) {
- dmu_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ if (doi.doi_bonus_type != DMU_OT_SA &&
+ (doi.doi_bonus_type != DMU_OT_ZNODE ||
+ (doi.doi_bonus_type == DMU_OT_ZNODE &&
+ doi.doi_bonus_size < sizeof (znode_phys_t)))) {
+ sa_buf_rele(db, NULL);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
- zp = dmu_buf_get_user(db);
- if (zp != NULL) {
- mutex_enter(&zp->z_lock);
+ hdl = dmu_buf_get_user(db);
+ if (hdl != NULL) {
+ zp = sa_get_userdata(hdl);
+
/*
- * Since we do immediate eviction of the z_dbuf, we
- * should never find a dbuf with a znode that doesn't
- * know about the dbuf.
+ * Since "SA" does immediate eviction we
+ * should never find a sa handle that doesn't
+ * know about the znode.
*/
- ASSERT3P(zp->z_dbuf, ==, db);
+
+ ASSERT3P(zp, !=, NULL);
+
+ mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
if (zp->z_unlinked) {
err = ENOENT;
} else {
- VN_HOLD(ZTOV(zp));
+ igrab(ZTOI(zp));
*zpp = zp;
err = 0;
}
- dmu_buf_rele(db, NULL);
+ sa_buf_rele(db, NULL);
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
/*
- * Not found create new znode/vnode
+ * Not found create new znode/vnode but only if file exists.
+ *
+ * There is a small window where zfs_vget() could
+ * find this object while a file create is still in
+ * progress. This is checked for in zfs_znode_alloc()
+ *
+ * if zfs_znode_alloc() fails it will drop the hold on the
+ * bonus buffer.
*/
- zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
- *zpp = zp;
- return (0);
+ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
+ doi.doi_bonus_type, obj_num, NULL);
+ if (zp == NULL) {
+ err = ENOENT;
+ } else {
+ *zpp = zp;
+ }
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ return (err);
}
int
zfs_rezget(znode_t *zp)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
+ uint64_t mode;
+ sa_bulk_attr_t bulk[8];
int err;
+ int count = 0;
+ uint64_t gen;
+
+ ZFS_OBJ_HOLD_ENTER(zsb, obj_num);
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
+ mutex_enter(&zp->z_acl_lock);
+ if (zp->z_acl_cached) {
+ zfs_acl_free(zp->z_acl_cached);
+ zp->z_acl_cached = NULL;
+ }
- err = dmu_bonus_hold(zfsvfs->z_os, obj_num, NULL, &db);
+ mutex_exit(&zp->z_acl_lock);
+ ASSERT(zp->z_sa_hdl == NULL);
+ err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
if (err) {
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (err);
}
dmu_object_info_from_db(db, &doi);
- if (doi.doi_bonus_type != DMU_OT_ZNODE ||
- doi.doi_bonus_size < sizeof (znode_phys_t)) {
- dmu_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ if (doi.doi_bonus_type != DMU_OT_SA &&
+ (doi.doi_bonus_type != DMU_OT_ZNODE ||
+ (doi.doi_bonus_type == DMU_OT_ZNODE &&
+ doi.doi_bonus_size < sizeof (znode_phys_t)))) {
+ sa_buf_rele(db, NULL);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EINVAL);
}
- if (((znode_phys_t *)db->db_data)->zp_gen != zp->z_gen) {
- dmu_buf_rele(db, NULL);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);
+
+ /* reload cached values */
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
+ &gen, sizeof (gen));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
+ &zp->z_size, sizeof (zp->z_size));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
+ &zp->z_links, sizeof (zp->z_links));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
+ &zp->z_pflags, sizeof (zp->z_pflags));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
+ &zp->z_atime, sizeof (zp->z_atime));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
+ &zp->z_uid, sizeof (zp->z_uid));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
+ &zp->z_gid, sizeof (zp->z_gid));
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
+ &mode, sizeof (mode));
+
+ if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
+ zfs_znode_dmu_fini(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
+ return (EIO);
+ }
+
+ zp->z_mode = mode;
+
+ if (gen != zp->z_gen) {
+ zfs_znode_dmu_fini(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (EIO);
}
- zfs_znode_dmu_init(zfsvfs, zp, db);
- zp->z_unlinked = (zp->z_phys->zp_links == 0);
+ zp->z_unlinked = (zp->z_links == 0);
zp->z_blksz = doi.doi_data_block_size;
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- objset_t *os = zfsvfs->z_os;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ objset_t *os = zsb->z_os;
uint64_t obj = zp->z_id;
- uint64_t acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj;
+ uint64_t acl_obj = zfs_external_acl(zp);
- ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
- if (acl_obj)
+ ZFS_OBJ_HOLD_ENTER(zsb, obj);
+ if (acl_obj) {
+ VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
+ }
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
- zfs_znode_free(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
void
zfs_zinactive(znode_t *zp)
{
- vnode_t *vp = ZTOV(zp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
- ASSERT(zp->z_dbuf && zp->z_phys);
+ ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode
*/
- ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
-
+ ZFS_OBJ_HOLD_ENTER(zsb, z_id);
mutex_enter(&zp->z_lock);
- mutex_enter(&vp->v_lock);
- vp->v_count--;
- if (vp->v_count > 0 || vn_has_cached_data(vp)) {
- /*
- * If the hold count is greater than zero, somebody has
- * obtained a new reference on this znode while we were
- * processing it here, so we are done. If we still have
- * mapped pages then we are also done, since we don't
- * want to inactivate the znode until the pages get pushed.
- *
- * XXX - if vn_has_cached_data(vp) is true, but count == 0,
- * this seems like it would leave the znode hanging with
- * no chance to go inactive...
- */
- mutex_exit(&vp->v_lock);
- mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- return;
- }
- mutex_exit(&vp->v_lock);
/*
* If this was the last reference to a file with no links,
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
zfs_rmnode(zp);
return;
}
+
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
- zfs_znode_free(zp);
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
}
void
-zfs_znode_free(znode_t *zp)
-{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- vn_invalid(ZTOV(zp));
-
- ASSERT(ZTOV(zp)->v_count == 0);
-
- mutex_enter(&zfsvfs->z_znodes_lock);
- POINTER_INVALIDATE(&zp->z_zfsvfs);
- list_remove(&zfsvfs->z_all_znodes, zp);
- mutex_exit(&zfsvfs->z_znodes_lock);
-
- kmem_cache_free(znode_cache, zp);
-
- VFS_RELE(zfsvfs->z_vfs);
-}
-
-void
-zfs_time_stamper_locked(znode_t *zp, uint_t flag, dmu_tx_t *tx)
+zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
+ uint64_t ctime[2], boolean_t have_tx)
{
timestruc_t now;
- ASSERT(MUTEX_HELD(&zp->z_lock));
-
gethrestime(&now);
- if (tx) {
- dmu_buf_will_dirty(zp->z_dbuf, tx);
+ if (have_tx) { /* will sa_bulk_update happen really soon? */
zp->z_atime_dirty = 0;
zp->z_seq++;
} else {
zp->z_atime_dirty = 1;
}
- if (flag & AT_ATIME)
- ZFS_TIME_ENCODE(&now, zp->z_phys->zp_atime);
-
- if (flag & AT_MTIME) {
- ZFS_TIME_ENCODE(&now, zp->z_phys->zp_mtime);
- if (zp->z_zfsvfs->z_use_fuids)
- zp->z_phys->zp_flags |= (ZFS_ARCHIVE | ZFS_AV_MODIFIED);
+ if (flag & ATTR_ATIME) {
+ ZFS_TIME_ENCODE(&now, zp->z_atime);
}
- if (flag & AT_CTIME) {
- ZFS_TIME_ENCODE(&now, zp->z_phys->zp_ctime);
- if (zp->z_zfsvfs->z_use_fuids)
- zp->z_phys->zp_flags |= ZFS_ARCHIVE;
+ if (flag & ATTR_MTIME) {
+ ZFS_TIME_ENCODE(&now, mtime);
+ if (ZTOZSB(zp)->z_use_fuids) {
+ zp->z_pflags |= (ZFS_ARCHIVE |
+ ZFS_AV_MODIFIED);
+ }
}
-}
-/*
- * Update the requested znode timestamps with the current time.
- * If we are in a transaction, then go ahead and mark the znode
- * dirty in the transaction so the timestamps will go to disk.
- * Otherwise, we will get pushed next time the znode is updated
- * in a transaction, or when this znode eventually goes inactive.
- *
- * Why is this OK?
- * 1 - Only the ACCESS time is ever updated outside of a transaction.
- * 2 - Multiple consecutive updates will be collapsed into a single
- * znode update by the transaction grouping semantics of the DMU.
- */
-void
-zfs_time_stamper(znode_t *zp, uint_t flag, dmu_tx_t *tx)
-{
- mutex_enter(&zp->z_lock);
- zfs_time_stamper_locked(zp, flag, tx);
- mutex_exit(&zp->z_lock);
+ if (flag & ATTR_CTIME) {
+ ZFS_TIME_ENCODE(&now, ctime);
+ if (ZTOZSB(zp)->z_use_fuids)
+ zp->z_pflags |= ZFS_ARCHIVE;
+ }
}
/*
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
- if (zp->z_blksz && zp->z_phys->zp_size > zp->z_blksz)
+ if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
- error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
+ error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
+
if (error == ENOTSUP)
return;
ASSERT3U(error, ==, 0);
/* What blocksize did we actually get? */
- dmu_object_size_from_db(zp->z_dbuf, &zp->z_blksz, &dummy);
+ dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
+#ifdef HAVE_MMAP
/*
* This is a dummy interface used when pvn_vplist_dirty() should *not*
* be calling back into the fs for a putpage(). E.g.: when truncating
ASSERT(0);
return (0);
}
+#endif /* HAVE_MMAP */
/*
* Increase the file length
static int
zfs_extend(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
uint64_t newblksz;
/*
* Nothing to do if file already at desired length.
*/
- if (end <= zp->z_phys->zp_size) {
+ if (end <= zp->z_size) {
zfs_range_unlock(rl);
return (0);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_bonus(tx, zp->z_id);
+ tx = dmu_tx_create(zsb->z_os);
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+ zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
- (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
+ (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
- if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
+ if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, SPA_MAXBLOCKSIZE);
} else {
- newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
+ newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
zfs_range_unlock(rl);
return (error);
}
- dmu_buf_will_dirty(zp->z_dbuf, tx);
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
- zp->z_phys->zp_size = end;
+ zp->z_size = end;
+
+ VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
+ &zp->z_size, sizeof (zp->z_size), tx));
zfs_range_unlock(rl);
* off - start of section to free.
* len - length of section to free.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
rl_t *rl;
int error;
/*
* Nothing to do if file already at desired length.
*/
- if (off >= zp->z_phys->zp_size) {
+ if (off >= zp->z_size) {
zfs_range_unlock(rl);
return (0);
}
- if (off + len > zp->z_phys->zp_size)
- len = zp->z_phys->zp_size - off;
+ if (off + len > zp->z_size)
+ len = zp->z_size - off;
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
zfs_range_unlock(rl);
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- vnode_t *vp = ZTOV(zp);
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_tx_t *tx;
rl_t *rl;
int error;
+ sa_bulk_attr_t bulk[2];
+ int count = 0;
/*
* We will change zp_size, lock the whole file.
/*
* Nothing to do if file already at desired length.
*/
- if (end >= zp->z_phys->zp_size) {
+ if (end >= zp->z_size) {
zfs_range_unlock(rl);
return (0);
}
- error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1);
+ error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
if (error) {
zfs_range_unlock(rl);
return (error);
}
top:
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_bonus(tx, zp->z_id);
+ tx = dmu_tx_create(zsb->z_os);
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+ zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
zfs_range_unlock(rl);
return (error);
}
- dmu_buf_will_dirty(zp->z_dbuf, tx);
-
- zp->z_phys->zp_size = end;
- dmu_tx_commit(tx);
+ zp->z_size = end;
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
+ NULL, &zp->z_size, sizeof (zp->z_size));
- /*
- * Clear any mapped pages in the truncated region. This has to
- * happen outside of the transaction to avoid the possibility of
- * a deadlock with someone trying to push a page that we are
- * about to invalidate.
- */
- if (vn_has_cached_data(vp)) {
- page_t *pp;
- uint64_t start = end & PAGEMASK;
- int poff = end & PAGEOFFSET;
-
- if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
- /*
- * We need to zero a partial page.
- */
- pagezero(pp, poff, PAGESIZE - poff);
- start += PAGESIZE;
- page_unlock(pp);
- }
- error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
- B_INVAL | B_TRUNC, NULL);
- ASSERT(error == 0);
+ if (end == 0) {
+ zp->z_pflags &= ~ZFS_SPARSE;
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
+ NULL, &zp->z_pflags, 8);
}
+ VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
+
+ dmu_tx_commit(tx);
zfs_range_unlock(rl);
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
- * RETURN: 0 if success
+ * RETURN: 0 if success
* error code if failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
- vnode_t *vp = ZTOV(zp);
+#ifdef HAVE_MANDLOCKS
+ struct inode *ip = ZTOI(zp);
+#endif /* HAVE_MANDLOCKS */
dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
+ uint64_t mode;
+ uint64_t mtime[2], ctime[2];
+ sa_bulk_attr_t bulk[3];
+ int count = 0;
int error;
- if (off > zp->z_phys->zp_size) {
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zsb), &mode,
+ sizeof (mode))) != 0)
+ return (error);
+
+ if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
return (error);
}
+#ifdef HAVE_MANDLOCKS
/*
* Check for any locks in the region to be freed.
*/
- if (MANDLOCK(vp, (mode_t)zp->z_phys->zp_mode)) {
- uint64_t length = (len ? len : zp->z_phys->zp_size - off);
- if (error = chklock(vp, FWRITE, off, length, flag, NULL))
+
+ if (MANDLOCK(ip, (mode_t)mode)) {
+ uint64_t length = (len ? len : zp->z_size - off);
+ if (error = chklock(ip, FWRITE, off, length, flag, NULL))
return (error);
}
+#endif /* HAVE_MANDLOCKS */
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
- off + len > zp->z_phys->zp_size)
+ off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
return (error);
log:
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_bonus(tx, zp->z_id);
+ tx = dmu_tx_create(zsb->z_os);
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+ zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
return (error);
}
- zfs_time_stamper(zp, CONTENT_MODIFIED, tx);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
+ NULL, &zp->z_pflags, 8);
+ zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
+ error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
+ ASSERT(error == 0);
+
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
+ zfs_inode_update(zp);
return (0);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
- zfsvfs_t zfsvfs;
- uint64_t moid, doid, version;
- uint64_t sense = ZFS_CASE_SENSITIVE;
+ uint64_t moid, obj, sa_obj, version;
uint64_t norm = 0;
nvpair_t *elem;
int error;
- znode_t *rootzp = NULL;
- vnode_t *vp;
- vattr_t vattr;
- znode_t *zp;
+ timestruc_t now;
+ dmu_buf_t *db;
+ znode_phys_t *pzp;
/*
* First attempt to create master node.
/*
* Set starting attributes.
*/
- if (spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID)
- version = ZPL_VERSION;
- else
- version = ZPL_VERSION_FUID - 1;
- error = zap_update(os, moid, ZPL_VERSION_STR,
- 8, 1, &version, tx);
+ version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
- version = val;
- error = zap_update(os, moid, ZPL_VERSION_STR,
- 8, 1, &version, tx);
+ if (val < version)
+ version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
- else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
- sense = val;
}
ASSERT(version != 0);
+ error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
/*
+ * Create zap object used for SA attribute registration
+ */
+
+ if (version >= ZPL_VERSION_SA) {
+ sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
+ DMU_OT_NONE, 0, tx);
+ error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
+ ASSERT(error == 0);
+ } else {
+ sa_obj = 0;
+ }
+ /*
* Create a delete queue.
*/
- doid = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
+ obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
- error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &doid, tx);
+ error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
- * Create root znode. Create minimal znode/vnode/zfsvfs
- * to allow zfs_mknode to work.
+ * Create root znode with code free of VFS dependencies. This
+ * is important because without a registered filesystem and super
+ * block all the required VFS hooks will be missing. The critical
+ * thing is to just crete the required root znode.
*/
- vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
- vattr.va_type = VDIR;
- vattr.va_mode = S_IFDIR|0755;
- vattr.va_uid = crgetuid(cr);
- vattr.va_gid = crgetgid(cr);
-
- rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
- rootzp->z_unlinked = 0;
- rootzp->z_atime_dirty = 0;
-
- vp = ZTOV(rootzp);
- vn_reinit(vp);
- vp->v_type = VDIR;
+ obj = zap_create_norm(os, norm, DMU_OT_DIRECTORY_CONTENTS,
+ DMU_OT_ZNODE, sizeof (znode_phys_t), tx);
- bzero(&zfsvfs, sizeof (zfsvfs_t));
+ VERIFY(0 == dmu_bonus_hold(os, obj, FTAG, &db));
+ dmu_buf_will_dirty(db, tx);
- zfsvfs.z_os = os;
- zfsvfs.z_parent = &zfsvfs;
- zfsvfs.z_version = version;
- zfsvfs.z_use_fuids = USE_FUIDS(version, os);
- zfsvfs.z_norm = norm;
/*
- * Fold case on file systems that are always or sometimes case
- * insensitive.
+ * Initialize the znode physical data to zero.
*/
- if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
- zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
-
- mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
- list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
- offsetof(znode_t, z_link_node));
-
- ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
- rootzp->z_zfsvfs = &zfsvfs;
- zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, 0, NULL, NULL);
- ASSERT3P(zp, ==, rootzp);
- ASSERT(!vn_in_dnlc(ZTOV(rootzp))); /* not valid to move */
- error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
+ ASSERT(db->db_size >= sizeof (znode_phys_t));
+ bzero(db->db_data, db->db_size);
+ pzp = db->db_data;
+
+ if (USE_FUIDS(version, os))
+ pzp->zp_flags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
+
+ pzp->zp_size = 2; /* "." and ".." */
+ pzp->zp_links = 2;
+ pzp->zp_parent = obj;
+ pzp->zp_gen = dmu_tx_get_txg(tx);
+ pzp->zp_mode = S_IFDIR | 0755;
+ pzp->zp_flags = ZFS_ACL_TRIVIAL;
+
+ gethrestime(&now);
+
+ ZFS_TIME_ENCODE(&now, pzp->zp_crtime);
+ ZFS_TIME_ENCODE(&now, pzp->zp_ctime);
+ ZFS_TIME_ENCODE(&now, pzp->zp_atime);
+ ZFS_TIME_ENCODE(&now, pzp->zp_mtime);
+
+ error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &obj, tx);
ASSERT(error == 0);
- POINTER_INVALIDATE(&rootzp->z_zfsvfs);
- ZTOV(rootzp)->v_count = 0;
- dmu_buf_rele(rootzp->z_dbuf, NULL);
- rootzp->z_dbuf = NULL;
- kmem_cache_free(znode_cache, rootzp);
+ dmu_buf_rele(db, FTAG);
}
#endif /* _KERNEL */
+
+static int
+zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
+{
+ uint64_t sa_obj = 0;
+ int error;
+
+ error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
+ if (error != 0 && error != ENOENT)
+ return (error);
+
+ error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
+ return (error);
+}
+
+static int
+zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
+ dmu_buf_t **db)
+{
+ dmu_object_info_t doi;
+ int error;
+
+ if ((error = sa_buf_hold(osp, obj, FTAG, db)) != 0)
+ return (error);
+
+ dmu_object_info_from_db(*db, &doi);
+ if ((doi.doi_bonus_type != DMU_OT_SA &&
+ doi.doi_bonus_type != DMU_OT_ZNODE) ||
+ (doi.doi_bonus_type == DMU_OT_ZNODE &&
+ doi.doi_bonus_size < sizeof (znode_phys_t))) {
+ sa_buf_rele(*db, FTAG);
+ return (ENOTSUP);
+ }
+
+ error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
+ if (error != 0) {
+ sa_buf_rele(*db, FTAG);
+ return (error);
+ }
+
+ return (0);
+}
+
+void
+zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db)
+{
+ sa_handle_destroy(hdl);
+ sa_buf_rele(db, FTAG);
+}
+
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
-zfs_obj_to_pobj(objset_t *osp, uint64_t obj, uint64_t *pobjp, int *is_xattrdir)
+zfs_obj_to_pobj(sa_handle_t *hdl, sa_attr_type_t *sa_table, uint64_t *pobjp,
+ int *is_xattrdir)
{
- dmu_buf_t *db;
- dmu_object_info_t doi;
- znode_phys_t *zp;
+ uint64_t parent;
+ uint64_t pflags;
+ uint64_t mode;
+ sa_bulk_attr_t bulk[3];
+ int count = 0;
int error;
- if ((error = dmu_bonus_hold(osp, obj, FTAG, &db)) != 0)
- return (error);
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
+ &parent, sizeof (parent));
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
+ &pflags, sizeof (pflags));
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
+ &mode, sizeof (mode));
- dmu_object_info_from_db(db, &doi);
- if (doi.doi_bonus_type != DMU_OT_ZNODE ||
- doi.doi_bonus_size < sizeof (znode_phys_t)) {
- dmu_buf_rele(db, FTAG);
- return (EINVAL);
- }
+ if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
+ return (error);
- zp = db->db_data;
- *pobjp = zp->zp_parent;
- *is_xattrdir = ((zp->zp_flags & ZFS_XATTR) != 0) &&
- S_ISDIR(zp->zp_mode);
- dmu_buf_rele(db, FTAG);
+ *pobjp = parent;
+ *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
return (0);
}
-int
-zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
+/*
+ * Given an object number, return some zpl level statistics
+ */
+static int
+zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
+ zfs_stat_t *sb)
{
+ sa_bulk_attr_t bulk[4];
+ int count = 0;
+
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
+ &sb->zs_mode, sizeof (sb->zs_mode));
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
+ &sb->zs_gen, sizeof (sb->zs_gen));
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
+ &sb->zs_links, sizeof (sb->zs_links));
+ SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
+ &sb->zs_ctime, sizeof (sb->zs_ctime));
+
+ return (sa_bulk_lookup(hdl, bulk, count));
+}
+
+static int
+zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
+ sa_attr_type_t *sa_table, char *buf, int len)
+{
+ sa_handle_t *sa_hdl;
+ sa_handle_t *prevhdl = NULL;
+ dmu_buf_t *prevdb = NULL;
+ dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
+ sa_hdl = hdl;
for (;;) {
uint64_t pobj;
size_t complen;
int is_xattrdir;
- if ((error = zfs_obj_to_pobj(osp, obj, &pobj,
+ if (prevdb)
+ zfs_release_sa_handle(prevhdl, prevdb);
+
+ if ((error = zfs_obj_to_pobj(sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
ASSERT(path >= buf);
bcopy(component, path, complen);
obj = pobj;
+
+ if (sa_hdl != hdl) {
+ prevhdl = sa_hdl;
+ prevdb = sa_db;
+ }
+ error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db);
+ if (error != 0) {
+ sa_hdl = prevhdl;
+ sa_db = prevdb;
+ break;
+ }
+ }
+
+ if (sa_hdl != NULL && sa_hdl != hdl) {
+ ASSERT(sa_db != NULL);
+ zfs_release_sa_handle(sa_hdl, sa_db);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
+
+ return (error);
+}
+
+int
+zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
+{
+ sa_attr_type_t *sa_table;
+ sa_handle_t *hdl;
+ dmu_buf_t *db;
+ int error;
+
+ error = zfs_sa_setup(osp, &sa_table);
+ if (error != 0)
+ return (error);
+
+ error = zfs_grab_sa_handle(osp, obj, &hdl, &db);
+ if (error != 0)
+ return (error);
+
+ error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
+
+ zfs_release_sa_handle(hdl, db);
+ return (error);
+}
+
+int
+zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
+ char *buf, int len)
+{
+ char *path = buf + len - 1;
+ sa_attr_type_t *sa_table;
+ sa_handle_t *hdl;
+ dmu_buf_t *db;
+ int error;
+
+ *path = '\0';
+
+ error = zfs_sa_setup(osp, &sa_table);
+ if (error != 0)
+ return (error);
+
+ error = zfs_grab_sa_handle(osp, obj, &hdl, &db);
+ if (error != 0)
+ return (error);
+
+ error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
+ if (error != 0) {
+ zfs_release_sa_handle(hdl, db);
+ return (error);
+ }
+
+ error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
+
+ zfs_release_sa_handle(hdl, db);
return (error);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(zfs_create_fs);
+EXPORT_SYMBOL(zfs_obj_to_path);
+#endif