4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/mntent.h>
37 #include <sys/mount.h>
38 #include <sys/cmn_err.h>
39 #include "fs/fs_subr.h"
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_vnops.h>
42 #include <sys/zfs_dir.h>
44 #include <sys/fs/zfs.h>
46 #include <sys/dsl_prop.h>
47 #include <sys/dsl_dataset.h>
48 #include <sys/dsl_deleg.h>
52 #include <sys/varargs.h>
53 #include <sys/policy.h>
54 #include <sys/atomic.h>
55 #include <sys/mkdev.h>
56 #include <sys/modctl.h>
57 #include <sys/refstr.h>
58 #include <sys/zfs_ioctl.h>
59 #include <sys/zfs_fuid.h>
60 #include <sys/bootconf.h>
61 #include <sys/sunddi.h>
63 #include <sys/dmu_objset.h>
64 #include <sys/spa_boot.h>
67 #include "zfs_comutil.h"
72 zfs_sync(struct super_block *sb, int wait, cred_t *cr)
74 zfs_sb_t *zsb = sb->s_fs_info;
77 * Data integrity is job one. We don't want a compromised kernel
78 * writing to the storage pool, so we never sync during panic.
80 if (unlikely(oops_in_progress))
84 * Semantically, the only requirement is that the sync be initiated.
85 * The DMU syncs out txgs frequently, so there's nothing to do.
92 * Sync a specific filesystem.
97 dp = dmu_objset_pool(zsb->z_os);
100 * If the system is shutting down, then skip any
101 * filesystems which may exist on a suspended pool.
103 if (spa_suspended(dp->dp_spa)) {
108 if (zsb->z_log != NULL)
109 zil_commit(zsb->z_log, 0);
114 * Sync all ZFS filesystems. This is what happens when you
115 * run sync(1M). Unlike other filesystems, ZFS honors the
116 * request by waiting for all pools to commit all dirty data.
123 EXPORT_SYMBOL(zfs_sync);
126 zfs_is_readonly(zfs_sb_t *zsb)
128 return (!!(zsb->z_sb->s_flags & MS_RDONLY));
130 EXPORT_SYMBOL(zfs_is_readonly);
133 atime_changed_cb(void *arg, uint64_t newval)
135 ((zfs_sb_t *)arg)->z_atime = newval;
139 xattr_changed_cb(void *arg, uint64_t newval)
144 zsb->z_flags |= ZSB_XATTR;
146 zsb->z_flags &= ~ZSB_XATTR;
150 blksz_changed_cb(void *arg, uint64_t newval)
154 if (newval < SPA_MINBLOCKSIZE ||
155 newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
156 newval = SPA_MAXBLOCKSIZE;
158 zsb->z_max_blksz = newval;
162 readonly_changed_cb(void *arg, uint64_t newval)
165 struct super_block *sb = zsb->z_sb;
171 sb->s_flags |= MS_RDONLY;
173 sb->s_flags &= ~MS_RDONLY;
177 devices_changed_cb(void *arg, uint64_t newval)
182 setuid_changed_cb(void *arg, uint64_t newval)
187 exec_changed_cb(void *arg, uint64_t newval)
192 nbmand_changed_cb(void *arg, uint64_t newval)
195 struct super_block *sb = zsb->z_sb;
201 sb->s_flags |= MS_MANDLOCK;
203 sb->s_flags &= ~MS_MANDLOCK;
207 snapdir_changed_cb(void *arg, uint64_t newval)
209 ((zfs_sb_t *)arg)->z_show_ctldir = newval;
213 vscan_changed_cb(void *arg, uint64_t newval)
215 ((zfs_sb_t *)arg)->z_vscan = newval;
219 acl_inherit_changed_cb(void *arg, uint64_t newval)
221 ((zfs_sb_t *)arg)->z_acl_inherit = newval;
225 zfs_register_callbacks(zfs_sb_t *zsb)
227 struct dsl_dataset *ds = NULL;
228 objset_t *os = zsb->z_os;
231 if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os)))
232 readonly_changed_cb(zsb, B_TRUE);
235 * Register property callbacks.
237 * It would probably be fine to just check for i/o error from
238 * the first prop_register(), but I guess I like to go
241 ds = dmu_objset_ds(os);
242 error = dsl_prop_register(ds,
243 "atime", atime_changed_cb, zsb);
244 error = error ? error : dsl_prop_register(ds,
245 "xattr", xattr_changed_cb, zsb);
246 error = error ? error : dsl_prop_register(ds,
247 "recordsize", blksz_changed_cb, zsb);
248 error = error ? error : dsl_prop_register(ds,
249 "readonly", readonly_changed_cb, zsb);
250 error = error ? error : dsl_prop_register(ds,
251 "devices", devices_changed_cb, zsb);
252 error = error ? error : dsl_prop_register(ds,
253 "setuid", setuid_changed_cb, zsb);
254 error = error ? error : dsl_prop_register(ds,
255 "exec", exec_changed_cb, zsb);
256 error = error ? error : dsl_prop_register(ds,
257 "snapdir", snapdir_changed_cb, zsb);
258 error = error ? error : dsl_prop_register(ds,
259 "aclinherit", acl_inherit_changed_cb, zsb);
260 error = error ? error : dsl_prop_register(ds,
261 "vscan", vscan_changed_cb, zsb);
262 error = error ? error : dsl_prop_register(ds,
263 "nbmand", nbmand_changed_cb, zsb);
271 * We may attempt to unregister some callbacks that are not
272 * registered, but this is OK; it will simply return ENOMSG,
273 * which we will ignore.
275 (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zsb);
276 (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zsb);
277 (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zsb);
278 (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zsb);
279 (void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zsb);
280 (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zsb);
281 (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zsb);
282 (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zsb);
283 (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
285 (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zsb);
286 (void) dsl_prop_unregister(ds, "nbmand", nbmand_changed_cb, zsb);
290 EXPORT_SYMBOL(zfs_register_callbacks);
293 zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
294 uint64_t *userp, uint64_t *groupp)
296 znode_phys_t *znp = data;
300 * Is it a valid type of object to track?
302 if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
306 * If we have a NULL data pointer
307 * then assume the id's aren't changing and
308 * return EEXIST to the dmu to let it know to
314 if (bonustype == DMU_OT_ZNODE) {
315 *userp = znp->zp_uid;
316 *groupp = znp->zp_gid;
320 ASSERT(bonustype == DMU_OT_SA);
321 hdrsize = sa_hdrsize(data);
324 *userp = *((uint64_t *)((uintptr_t)data + hdrsize +
326 *groupp = *((uint64_t *)((uintptr_t)data + hdrsize +
330 * This should only happen for newly created
331 * files that haven't had the znode data filled
342 fuidstr_to_sid(zfs_sb_t *zsb, const char *fuidstr,
343 char *domainbuf, int buflen, uid_t *ridp)
348 fuid = strtonum(fuidstr, NULL);
350 domain = zfs_fuid_find_by_idx(zsb, FUID_INDEX(fuid));
352 (void) strlcpy(domainbuf, domain, buflen);
355 *ridp = FUID_RID(fuid);
359 zfs_userquota_prop_to_obj(zfs_sb_t *zsb, zfs_userquota_prop_t type)
362 case ZFS_PROP_USERUSED:
363 return (DMU_USERUSED_OBJECT);
364 case ZFS_PROP_GROUPUSED:
365 return (DMU_GROUPUSED_OBJECT);
366 case ZFS_PROP_USERQUOTA:
367 return (zsb->z_userquota_obj);
368 case ZFS_PROP_GROUPQUOTA:
369 return (zsb->z_groupquota_obj);
377 zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type,
378 uint64_t *cookiep, void *vbuf, uint64_t *bufsizep)
383 zfs_useracct_t *buf = vbuf;
386 if (!dmu_objset_userspace_present(zsb->z_os))
389 obj = zfs_userquota_prop_to_obj(zsb, type);
395 for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep);
396 (error = zap_cursor_retrieve(&zc, &za)) == 0;
397 zap_cursor_advance(&zc)) {
398 if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) >
402 fuidstr_to_sid(zsb, za.za_name,
403 buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid);
405 buf->zu_space = za.za_first_integer;
411 ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep);
412 *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf;
413 *cookiep = zap_cursor_serialize(&zc);
414 zap_cursor_fini(&zc);
417 EXPORT_SYMBOL(zfs_userspace_many);
420 * buf must be big enough (eg, 32 bytes)
423 id_to_fuidstr(zfs_sb_t *zsb, const char *domain, uid_t rid,
424 char *buf, boolean_t addok)
429 if (domain && domain[0]) {
430 domainid = zfs_fuid_find_by_domain(zsb, domain, NULL, addok);
434 fuid = FUID_ENCODE(domainid, rid);
435 (void) sprintf(buf, "%llx", (longlong_t)fuid);
440 zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type,
441 const char *domain, uint64_t rid, uint64_t *valp)
449 if (!dmu_objset_userspace_present(zsb->z_os))
452 obj = zfs_userquota_prop_to_obj(zsb, type);
456 err = id_to_fuidstr(zsb, domain, rid, buf, B_FALSE);
460 err = zap_lookup(zsb->z_os, obj, buf, 8, 1, valp);
465 EXPORT_SYMBOL(zfs_userspace_one);
468 zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
469 const char *domain, uint64_t rid, uint64_t quota)
475 boolean_t fuid_dirtied;
477 if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
480 if (zsb->z_version < ZPL_VERSION_USERSPACE)
483 objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj :
484 &zsb->z_groupquota_obj;
486 err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE);
489 fuid_dirtied = zsb->z_fuid_dirty;
491 tx = dmu_tx_create(zsb->z_os);
492 dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
494 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
495 zfs_userquota_prop_prefixes[type]);
498 zfs_fuid_txhold(zsb, tx);
499 err = dmu_tx_assign(tx, TXG_WAIT);
505 mutex_enter(&zsb->z_lock);
507 *objp = zap_create(zsb->z_os, DMU_OT_USERGROUP_QUOTA,
509 VERIFY(0 == zap_add(zsb->z_os, MASTER_NODE_OBJ,
510 zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
512 mutex_exit(&zsb->z_lock);
515 err = zap_remove(zsb->z_os, *objp, buf, tx);
519 err = zap_update(zsb->z_os, *objp, buf, 8, 1, "a, tx);
523 zfs_fuid_sync(zsb, tx);
527 EXPORT_SYMBOL(zfs_set_userquota);
530 zfs_fuid_overquota(zfs_sb_t *zsb, boolean_t isgroup, uint64_t fuid)
533 uint64_t used, quota, usedobj, quotaobj;
536 usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
537 quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj;
539 if (quotaobj == 0 || zsb->z_replay)
542 (void) sprintf(buf, "%llx", (longlong_t)fuid);
543 err = zap_lookup(zsb->z_os, quotaobj, buf, 8, 1, "a);
547 err = zap_lookup(zsb->z_os, usedobj, buf, 8, 1, &used);
550 return (used >= quota);
552 EXPORT_SYMBOL(zfs_fuid_overquota);
555 zfs_owner_overquota(zfs_sb_t *zsb, znode_t *zp, boolean_t isgroup)
560 quotaobj = isgroup ? zsb->z_groupquota_obj : zsb->z_userquota_obj;
562 fuid = isgroup ? zp->z_gid : zp->z_uid;
564 if (quotaobj == 0 || zsb->z_replay)
567 return (zfs_fuid_overquota(zsb, isgroup, fuid));
569 EXPORT_SYMBOL(zfs_owner_overquota);
572 zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
580 zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);
583 * We claim to always be readonly so we can open snapshots;
584 * other ZPL code will prevent us from writing to snapshots.
586 error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
588 kmem_free(zsb, sizeof (zfs_sb_t));
593 * Initialize the zfs-specific filesystem structure.
594 * Should probably make this a kmem cache, shuffle fields,
595 * and just bzero up to z_hold_mtx[].
599 zsb->z_max_blksz = SPA_MAXBLOCKSIZE;
600 zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
603 error = -bdi_init(&zsb->z_bdi);
605 kmem_free(zsb, sizeof (zfs_sb_t));
609 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
612 } else if (zsb->z_version >
613 zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
614 (void) printk("Can't mount a version %lld file system "
615 "on a version %lld pool\n. Pool must be upgraded to mount "
616 "this file system.", (u_longlong_t)zsb->z_version,
617 (u_longlong_t)spa_version(dmu_objset_spa(os)));
621 if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
623 zsb->z_norm = (int)zval;
625 if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
627 zsb->z_utf8 = (zval != 0);
629 if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
631 zsb->z_case = (uint_t)zval;
634 * Fold case on file systems that are always or sometimes case
637 if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
638 zsb->z_case == ZFS_CASE_MIXED)
639 zsb->z_norm |= U8_TEXTPREP_TOUPPER;
641 zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
642 zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);
645 /* should either have both of these objects or none */
646 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
652 * Pre SA versions file systems should never touch
653 * either the attribute registration or layout objects.
658 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
663 if (zsb->z_version >= ZPL_VERSION_SA)
664 sa_register_update_callback(os, zfs_sa_upgrade);
666 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
670 ASSERT(zsb->z_root != 0);
672 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
673 &zsb->z_unlinkedobj);
677 error = zap_lookup(os, MASTER_NODE_OBJ,
678 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
679 8, 1, &zsb->z_userquota_obj);
680 if (error && error != ENOENT)
683 error = zap_lookup(os, MASTER_NODE_OBJ,
684 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
685 8, 1, &zsb->z_groupquota_obj);
686 if (error && error != ENOENT)
689 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
691 if (error && error != ENOENT)
694 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
696 if (error && error != ENOENT)
699 mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
700 mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
701 list_create(&zsb->z_all_znodes, sizeof (znode_t),
702 offsetof(znode_t, z_link_node));
703 rrw_init(&zsb->z_teardown_lock);
704 rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
705 rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);
706 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
707 mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
713 dmu_objset_disown(os, zsb);
715 kmem_free(zsb, sizeof (zfs_sb_t));
718 EXPORT_SYMBOL(zfs_sb_create);
721 zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting)
725 error = zfs_register_callbacks(zsb);
730 * Set the objset user_ptr to track its zsb.
732 mutex_enter(&zsb->z_os->os_user_ptr_lock);
733 dmu_objset_set_user(zsb->z_os, zsb);
734 mutex_exit(&zsb->z_os->os_user_ptr_lock);
736 zsb->z_log = zil_open(zsb->z_os, zfs_get_data);
739 * If we are not mounting (ie: online recv), then we don't
740 * have to worry about replaying the log as we blocked all
741 * operations out since we closed the ZIL.
747 * During replay we remove the read only flag to
748 * allow replays to succeed.
750 readonly = zfs_is_readonly(zsb);
752 readonly_changed_cb(zsb, B_FALSE);
754 zfs_unlinked_drain(zsb);
757 * Parse and replay the intent log.
759 * Because of ziltest, this must be done after
760 * zfs_unlinked_drain(). (Further note: ziltest
761 * doesn't use readonly mounts, where
762 * zfs_unlinked_drain() isn't called.) This is because
763 * ziltest causes spa_sync() to think it's committed,
764 * but actually it is not, so the intent log contains
765 * many txg's worth of changes.
767 * In particular, if object N is in the unlinked set in
768 * the last txg to actually sync, then it could be
769 * actually freed in a later txg and then reallocated
770 * in a yet later txg. This would write a "create
771 * object N" record to the intent log. Normally, this
772 * would be fine because the spa_sync() would have
773 * written out the fact that object N is free, before
774 * we could write the "create object N" intent log
777 * But when we are in ziltest mode, we advance the "open
778 * txg" without actually spa_sync()-ing the changes to
779 * disk. So we would see that object N is still
780 * allocated and in the unlinked set, and there is an
781 * intent log record saying to allocate it.
783 if (spa_writeable(dmu_objset_spa(zsb->z_os))) {
784 if (zil_replay_disable) {
785 zil_destroy(zsb->z_log, B_FALSE);
787 zsb->z_replay = B_TRUE;
788 zil_replay(zsb->z_os, zsb,
790 zsb->z_replay = B_FALSE;
794 /* restore readonly bit */
796 readonly_changed_cb(zsb, B_TRUE);
801 EXPORT_SYMBOL(zfs_sb_setup);
804 zfs_sb_free(zfs_sb_t *zsb)
808 zfs_fuid_destroy(zsb);
810 bdi_destroy(&zsb->z_bdi);
811 mutex_destroy(&zsb->z_znodes_lock);
812 mutex_destroy(&zsb->z_lock);
813 list_destroy(&zsb->z_all_znodes);
814 rrw_destroy(&zsb->z_teardown_lock);
815 rw_destroy(&zsb->z_teardown_inactive_lock);
816 rw_destroy(&zsb->z_fuid_lock);
817 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
818 mutex_destroy(&zsb->z_hold_mtx[i]);
819 kmem_free(zsb, sizeof (zfs_sb_t));
821 EXPORT_SYMBOL(zfs_sb_free);
824 zfs_set_fuid_feature(zfs_sb_t *zsb)
826 zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
827 zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);
831 zfs_unregister_callbacks(zfs_sb_t *zsb)
833 objset_t *os = zsb->z_os;
834 struct dsl_dataset *ds;
837 * Unregister properties.
839 if (!dmu_objset_is_snapshot(os)) {
840 ds = dmu_objset_ds(os);
841 VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
844 VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
847 VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
850 VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
853 VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb,
856 VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
859 VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
862 VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
865 VERIFY(dsl_prop_unregister(ds, "aclinherit",
866 acl_inherit_changed_cb, zsb) == 0);
868 VERIFY(dsl_prop_unregister(ds, "vscan",
869 vscan_changed_cb, zsb) == 0);
871 VERIFY(dsl_prop_unregister(ds, "nbmand",
872 nbmand_changed_cb, zsb) == 0);
875 EXPORT_SYMBOL(zfs_unregister_callbacks);
879 * zfs_check_global_label:
880 * Check that the hex label string is appropriate for the dataset
881 * being mounted into the global_zone proper.
883 * Return an error if the hex label string is not default or
884 * admin_low/admin_high. For admin_low labels, the corresponding
885 * dataset must be readonly.
888 zfs_check_global_label(const char *dsname, const char *hexsl)
890 if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
892 if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
894 if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
895 /* must be readonly */
898 if (dsl_prop_get_integer(dsname,
899 zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
901 return (rdonly ? 0 : EACCES);
905 EXPORT_SYMBOL(zfs_check_global_label);
906 #endif /* HAVE_MLSLABEL */
909 zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
911 zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
912 uint64_t refdbytes, availbytes, usedobjs, availobjs;
917 dmu_objset_space(zsb->z_os,
918 &refdbytes, &availbytes, &usedobjs, &availobjs);
921 * The underlying storage pool actually uses multiple block
922 * size. Under Solaris frsize (fragment size) is reported as
923 * the smallest block size we support, and bsize (block size)
924 * as the filesystem's maximum block size. Unfortunately,
925 * under Linux the fragment size and block size are often used
926 * interchangeably. Thus we are forced to report both of them
927 * as the filesystem's maximum block size.
929 statp->f_frsize = zsb->z_max_blksz;
930 statp->f_bsize = zsb->z_max_blksz;
931 bshift = fls(statp->f_bsize) - 1;
934 * The following report "total" blocks of various kinds in
935 * the file system, but reported in terms of f_bsize - the
939 statp->f_blocks = (refdbytes + availbytes) >> bshift;
940 statp->f_bfree = availbytes >> bshift;
941 statp->f_bavail = statp->f_bfree; /* no root reservation */
944 * statvfs() should really be called statufs(), because it assumes
945 * static metadata. ZFS doesn't preallocate files, so the best
946 * we can do is report the max that could possibly fit in f_files,
947 * and that minus the number actually used in f_ffree.
948 * For f_ffree, report the smaller of the number of object available
949 * and the number of blocks (each object will take at least a block).
951 statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
952 statp->f_files = statp->f_ffree + usedobjs;
953 statp->f_fsid.val[0] = dentry->d_sb->s_dev;
954 statp->f_fsid.val[1] = 0;
955 statp->f_type = ZFS_SUPER_MAGIC;
956 statp->f_namelen = ZFS_MAXNAMELEN;
959 * We have all of 40 characters to stuff a string here.
960 * Is there anything useful we could/should provide?
962 bzero(statp->f_spare, sizeof (statp->f_spare));
967 EXPORT_SYMBOL(zfs_statvfs);
970 zfs_root(zfs_sb_t *zsb, struct inode **ipp)
977 error = zfs_zget(zsb, zsb->z_root, &rootzp);
984 EXPORT_SYMBOL(zfs_root);
987 * Teardown the zfs_sb_t::z_os.
989 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
990 * and 'z_teardown_inactive_lock' held.
993 zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
997 rrw_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG);
1001 * We purge the parent filesystem's super block as the
1002 * parent filesystem and all of its snapshots have their
1003 * inode's super block set to the parent's filesystem's
1004 * super block. Note, 'z_parent' is self referential
1005 * for non-snapshots.
1007 shrink_dcache_sb(zsb->z_parent->z_sb);
1008 (void) spl_invalidate_inodes(zsb->z_parent->z_sb, 0);
1012 * Drain the iput_taskq to ensure all active references to the
1013 * zfs_sb_t have been handled only then can it be safely destroyed.
1015 taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os)));
1018 * Close the zil. NB: Can't close the zil while zfs_inactive
1019 * threads are blocked as zil_close can call zfs_inactive.
1022 zil_close(zsb->z_log);
1026 rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER);
1029 * If we are not unmounting (ie: online recv) and someone already
1030 * unmounted this file system while we were doing the switcheroo,
1031 * or a reopen of z_os failed then just bail out now.
1033 if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
1034 rw_exit(&zsb->z_teardown_inactive_lock);
1035 rrw_exit(&zsb->z_teardown_lock, FTAG);
1040 * At this point there are no vops active, and any new vops will
1041 * fail with EIO since we have z_teardown_lock for writer (only
1042 * relavent for forced unmount).
1044 * Release all holds on dbufs.
1046 mutex_enter(&zsb->z_znodes_lock);
1047 for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
1048 zp = list_next(&zsb->z_all_znodes, zp))
1050 ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0);
1051 zfs_znode_dmu_fini(zp);
1053 mutex_exit(&zsb->z_znodes_lock);
1056 * If we are unmounting, set the unmounted flag and let new vops
1057 * unblock. zfs_inactive will have the unmounted behavior, and all
1058 * other vops will fail with EIO.
1061 zsb->z_unmounted = B_TRUE;
1062 rrw_exit(&zsb->z_teardown_lock, FTAG);
1063 rw_exit(&zsb->z_teardown_inactive_lock);
1067 * z_os will be NULL if there was an error in attempting to reopen
1068 * zsb, so just return as the properties had already been
1070 * unregistered and cached data had been evicted before.
1072 if (zsb->z_os == NULL)
1076 * Unregister properties.
1078 zfs_unregister_callbacks(zsb);
1083 if (dmu_objset_is_dirty_anywhere(zsb->z_os))
1084 if (!zfs_is_readonly(zsb))
1085 txg_wait_synced(dmu_objset_pool(zsb->z_os), 0);
1086 (void) dmu_objset_evict_dbufs(zsb->z_os);
1090 EXPORT_SYMBOL(zfs_sb_teardown);
1093 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1094 #endif /* HAVE_BDI */
1097 zfs_domount(struct super_block *sb, void *data, int silent)
1099 zpl_mount_data_t *zmd = data;
1100 const char *osname = zmd->z_osname;
1102 struct inode *root_inode;
1103 uint64_t recordsize;
1106 error = zfs_sb_create(osname, &zsb);
1110 if ((error = dsl_prop_get_integer(osname, "recordsize",
1111 &recordsize, NULL)))
1115 sb->s_fs_info = zsb;
1116 sb->s_magic = ZFS_SUPER_MAGIC;
1117 sb->s_maxbytes = MAX_LFS_FILESIZE;
1118 sb->s_time_gran = 1;
1119 sb->s_blocksize = recordsize;
1120 sb->s_blocksize_bits = ilog2(recordsize);
1121 bdi_put_sb(sb, NULL);
1123 /* Set callback operations for the file system. */
1124 sb->s_op = &zpl_super_operations;
1125 sb->s_xattr = zpl_xattr_handlers;
1126 sb->s_export_op = &zpl_export_operations;
1128 /* Set features for file system. */
1129 zfs_set_fuid_feature(zsb);
1131 if (dmu_objset_is_snapshot(zsb->z_os)) {
1134 atime_changed_cb(zsb, B_FALSE);
1135 readonly_changed_cb(zsb, B_TRUE);
1136 if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
1138 xattr_changed_cb(zsb, pval);
1139 zsb->z_issnap = B_TRUE;
1140 zsb->z_os->os_sync = ZFS_SYNC_DISABLED;
1142 mutex_enter(&zsb->z_os->os_user_ptr_lock);
1143 dmu_objset_set_user(zsb->z_os, zsb);
1144 mutex_exit(&zsb->z_os->os_user_ptr_lock);
1146 /* Disable Linux read-ahead handled by lower layers */
1147 zsb->z_bdi.ra_pages = 0;
1149 error = -bdi_register(&zsb->z_bdi, NULL, "zfs-%d",
1150 atomic_long_inc_return(&bdi_seq));
1154 bdi_put_sb(sb, &zsb->z_bdi);
1156 error = zfs_sb_setup(zsb, B_TRUE);
1157 #ifdef HAVE_SNAPSHOT
1158 (void) zfs_snap_create(zsb);
1159 #endif /* HAVE_SNAPSHOT */
1162 /* Allocate a root inode for the filesystem. */
1163 error = zfs_root(zsb, &root_inode);
1165 (void) zfs_umount(sb);
1169 /* Allocate a root dentry for the filesystem */
1170 sb->s_root = d_alloc_root(root_inode);
1171 if (sb->s_root == NULL) {
1172 (void) zfs_umount(sb);
1178 dmu_objset_disown(zsb->z_os, zsb);
1184 EXPORT_SYMBOL(zfs_domount);
1188 zfs_umount(struct super_block *sb)
1190 zfs_sb_t *zsb = sb->s_fs_info;
1193 VERIFY(zfs_sb_teardown(zsb, B_TRUE) == 0);
1196 if (bdi_get_sb(sb)) {
1197 bdi_unregister(bdi_get_sb(sb));
1198 bdi_put_sb(sb, NULL);
1202 * z_os will be NULL if there was an error in
1203 * attempting to reopen zsb.
1207 * Unset the objset user_ptr.
1209 mutex_enter(&os->os_user_ptr_lock);
1210 dmu_objset_set_user(os, NULL);
1211 mutex_exit(&os->os_user_ptr_lock);
1214 * Finally release the objset
1216 dmu_objset_disown(os, zsb);
1222 EXPORT_SYMBOL(zfs_umount);
1225 zfs_remount(struct super_block *sb, int *flags, char *data)
1228 * All namespace flags (MNT_*) and super block flags (MS_*) will
1229 * be handled by the Linux VFS. Only handle custom options here.
1233 EXPORT_SYMBOL(zfs_remount);
1236 zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
1238 zfs_sb_t *zsb = sb->s_fs_info;
1240 uint64_t object = 0;
1241 uint64_t fid_gen = 0;
1250 if (fidp->fid_len == LONG_FID_LEN) {
1251 zfid_long_t *zlfid = (zfid_long_t *)fidp;
1252 uint64_t objsetid = 0;
1253 uint64_t setgen = 0;
1255 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1256 objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1258 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1259 setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1263 #ifdef HAVE_SNAPSHOT
1264 err = zfsctl_lookup_objset(vfsp, objsetid, &zsb);
1267 #endif /* HAVE_SNAPSHOT */
1271 if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1272 zfid_short_t *zfid = (zfid_short_t *)fidp;
1274 for (i = 0; i < sizeof (zfid->zf_object); i++)
1275 object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1277 for (i = 0; i < sizeof (zfid->zf_gen); i++)
1278 fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1284 #ifdef HAVE_SNAPSHOT
1285 /* A zero fid_gen means we are in the .zfs control directories */
1287 (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1288 *ipp = zsb->z_ctldir;
1289 ASSERT(*ipp != NULL);
1290 if (object == ZFSCTL_INO_SNAPDIR) {
1291 VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, NULL,
1292 0, NULL, NULL, NULL, NULL, NULL) == 0);
1299 #endif /* HAVE_SNAPSHOT */
1301 gen_mask = -1ULL >> (64 - 8 * i);
1303 dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
1304 if ((err = zfs_zget(zsb, object, &zp))) {
1308 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb), &zp_gen,
1310 zp_gen = zp_gen & gen_mask;
1313 if (zp->z_unlinked || zp_gen != fid_gen) {
1314 dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
1322 zfs_inode_update(ITOZ(*ipp));
1327 EXPORT_SYMBOL(zfs_vget);
1330 * Block out VOPs and close zfs_sb_t::z_os
1332 * Note, if successful, then we return with the 'z_teardown_lock' and
1333 * 'z_teardown_inactive_lock' write held.
1336 zfs_suspend_fs(zfs_sb_t *zsb)
1340 if ((error = zfs_sb_teardown(zsb, B_FALSE)) != 0)
1342 dmu_objset_disown(zsb->z_os, zsb);
1346 EXPORT_SYMBOL(zfs_suspend_fs);
1349 * Reopen zfs_sb_t::z_os and release VOPs.
1352 zfs_resume_fs(zfs_sb_t *zsb, const char *osname)
1356 ASSERT(RRW_WRITE_HELD(&zsb->z_teardown_lock));
1357 ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock));
1359 err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zsb, &zsb->z_os);
1364 uint64_t sa_obj = 0;
1366 err2 = zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
1367 ZFS_SA_ATTRS, 8, 1, &sa_obj);
1369 if ((err || err2) && zsb->z_version >= ZPL_VERSION_SA)
1373 if ((err = sa_setup(zsb->z_os, sa_obj,
1374 zfs_attr_table, ZPL_END, &zsb->z_attr_table)) != 0)
1377 VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0);
1380 * Attempt to re-establish all the active znodes with
1381 * their dbufs. If a zfs_rezget() fails, then we'll let
1382 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
1383 * when they try to use their znode.
1385 mutex_enter(&zsb->z_znodes_lock);
1386 for (zp = list_head(&zsb->z_all_znodes); zp;
1387 zp = list_next(&zsb->z_all_znodes, zp)) {
1388 (void) zfs_rezget(zp);
1390 mutex_exit(&zsb->z_znodes_lock);
1395 /* release the VOPs */
1396 rw_exit(&zsb->z_teardown_inactive_lock);
1397 rrw_exit(&zsb->z_teardown_lock, FTAG);
1401 * Since we couldn't reopen zfs_sb_t::z_os, force
1402 * unmount this file system.
1404 (void) zfs_umount(zsb->z_sb);
1408 EXPORT_SYMBOL(zfs_resume_fs);
1411 zfs_set_version(zfs_sb_t *zsb, uint64_t newvers)
1414 objset_t *os = zsb->z_os;
1417 if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1420 if (newvers < zsb->z_version)
1423 if (zfs_spa_version_map(newvers) >
1424 spa_version(dmu_objset_spa(zsb->z_os)))
1427 tx = dmu_tx_create(os);
1428 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
1429 if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
1430 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
1432 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1434 error = dmu_tx_assign(tx, TXG_WAIT);
1440 error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1441 8, 1, &newvers, tx);
1448 if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
1451 ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=,
1453 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1454 DMU_OT_NONE, 0, tx);
1456 error = zap_add(os, MASTER_NODE_OBJ,
1457 ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1458 ASSERT3U(error, ==, 0);
1460 VERIFY(0 == sa_set_sa_object(os, sa_obj));
1461 sa_register_update_callback(os, zfs_sa_upgrade);
1464 spa_history_log_internal(LOG_DS_UPGRADE,
1465 dmu_objset_spa(os), tx, "oldver=%llu newver=%llu dataset = %llu",
1466 zsb->z_version, newvers, dmu_objset_id(os));
1470 zsb->z_version = newvers;
1472 if (zsb->z_version >= ZPL_VERSION_FUID)
1473 zfs_set_fuid_feature(zsb);
1477 EXPORT_SYMBOL(zfs_set_version);
1480 * Read a property stored within the master node.
1483 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
1489 * Look up the file system's value for the property. For the
1490 * version property, we look up a slightly different string.
1492 if (prop == ZFS_PROP_VERSION)
1493 pname = ZPL_VERSION_STR;
1495 pname = zfs_prop_to_name(prop);
1498 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
1500 if (error == ENOENT) {
1501 /* No value set, use the default value */
1503 case ZFS_PROP_VERSION:
1504 *value = ZPL_VERSION;
1506 case ZFS_PROP_NORMALIZE:
1507 case ZFS_PROP_UTF8ONLY:
1511 *value = ZFS_CASE_SENSITIVE;
1520 EXPORT_SYMBOL(zfs_get_zplprop);
1526 dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
1527 register_filesystem(&zpl_fs_type);
1533 unregister_filesystem(&zpl_fs_type);