4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2010 Robert Milkowski */
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/vfs_opreg.h>
36 #include <sys/mntent.h>
37 #include <sys/mount.h>
38 #include <sys/cmn_err.h>
39 #include "fs/fs_subr.h"
40 #include <sys/zfs_znode.h>
41 #include <sys/zfs_dir.h>
43 #include <sys/fs/zfs.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dsl_dataset.h>
47 #include <sys/dsl_deleg.h>
51 #include <sys/varargs.h>
52 #include <sys/policy.h>
53 #include <sys/atomic.h>
54 #include <sys/mkdev.h>
55 #include <sys/modctl.h>
56 #include <sys/refstr.h>
57 #include <sys/zfs_ioctl.h>
58 #include <sys/zfs_ctldir.h>
59 #include <sys/zfs_fuid.h>
60 #include <sys/bootconf.h>
61 #include <sys/sunddi.h>
63 #include <sys/dmu_objset.h>
64 #include <sys/spa_boot.h>
66 #include "zfs_comutil.h"
69 extern int sys_shutdown;
71 static int zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr);
72 static int zfs_mountroot(vfs_t *vfsp, enum whymountroot);
73 static void zfs_freevfs(vfs_t *vfsp);
76 * We need to keep a count of active fs's.
77 * This is necessary to prevent our module
78 * from being unloaded after a umount -f
80 static uint32_t zfs_active_fs_count = 0;
82 static char *noatime_cancel[] = { MNTOPT_ATIME, NULL };
83 static char *atime_cancel[] = { MNTOPT_NOATIME, NULL };
84 static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL };
85 static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL };
88 * MO_DEFAULT is not used since the default value is determined
89 * by the equivalent property.
91 static mntopt_t mntopts[] = {
92 { MNTOPT_NOXATTR, noxattr_cancel, NULL, 0, NULL },
93 { MNTOPT_XATTR, xattr_cancel, NULL, 0, NULL },
94 { MNTOPT_NOATIME, noatime_cancel, NULL, 0, NULL },
95 { MNTOPT_ATIME, atime_cancel, NULL, 0, NULL }
98 static mntopts_t zfs_mntopts = {
99 sizeof (mntopts) / sizeof (mntopt_t),
105 zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
108 * Data integrity is job one. We don't want a compromised kernel
109 * writing to the storage pool, so we never sync during panic.
115 * SYNC_ATTR is used by fsflush() to force old filesystems like UFS
116 * to sync metadata, which they would otherwise cache indefinitely.
117 * Semantically, the only requirement is that the sync be initiated.
118 * The DMU syncs out txgs frequently, so there's nothing to do.
120 if (flag & SYNC_ATTR)
125 * Sync a specific filesystem.
127 zfsvfs_t *zfsvfs = vfsp->vfs_data;
131 dp = dmu_objset_pool(zfsvfs->z_os);
134 * If the system is shutting down, then skip any
135 * filesystems which may exist on a suspended pool.
137 if (sys_shutdown && spa_suspended(dp->dp_spa)) {
142 if (zfsvfs->z_log != NULL)
143 zil_commit(zfsvfs->z_log, 0);
148 * Sync all ZFS filesystems. This is what happens when you
149 * run sync(1M). Unlike other filesystems, ZFS honors the
150 * request by waiting for all pools to commit all dirty data.
157 EXPORT_SYMBOL(zfs_sync);
160 atime_changed_cb(void *arg, uint64_t newval)
162 zfsvfs_t *zfsvfs = arg;
164 if (newval == TRUE) {
165 zfsvfs->z_atime = TRUE;
166 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
167 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
169 zfsvfs->z_atime = FALSE;
170 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
171 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
176 xattr_changed_cb(void *arg, uint64_t newval)
178 zfsvfs_t *zfsvfs = arg;
180 if (newval == TRUE) {
181 /* XXX locking on vfs_flag? */
182 zfsvfs->z_vfs->vfs_flag |= VFS_XATTR;
183 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR);
184 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0);
186 /* XXX locking on vfs_flag? */
187 zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR;
188 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR);
189 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0);
194 blksz_changed_cb(void *arg, uint64_t newval)
196 zfsvfs_t *zfsvfs = arg;
198 if (newval < SPA_MINBLOCKSIZE ||
199 newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
200 newval = SPA_MAXBLOCKSIZE;
202 zfsvfs->z_max_blksz = newval;
203 zfsvfs->z_vfs->vfs_bsize = newval;
207 readonly_changed_cb(void *arg, uint64_t newval)
209 zfsvfs_t *zfsvfs = arg;
212 /* XXX locking on vfs_flag? */
213 zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
214 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
215 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
217 /* XXX locking on vfs_flag? */
218 zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
219 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
220 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
225 devices_changed_cb(void *arg, uint64_t newval)
227 zfsvfs_t *zfsvfs = arg;
229 if (newval == FALSE) {
230 zfsvfs->z_vfs->vfs_flag |= VFS_NODEVICES;
231 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES);
232 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES, NULL, 0);
234 zfsvfs->z_vfs->vfs_flag &= ~VFS_NODEVICES;
235 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES);
236 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES, NULL, 0);
241 setuid_changed_cb(void *arg, uint64_t newval)
243 zfsvfs_t *zfsvfs = arg;
245 if (newval == FALSE) {
246 zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
247 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
248 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
250 zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
251 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
252 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
257 exec_changed_cb(void *arg, uint64_t newval)
259 zfsvfs_t *zfsvfs = arg;
261 if (newval == FALSE) {
262 zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
263 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
264 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
266 zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
267 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
268 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
273 * The nbmand mount option can be changed at mount time.
274 * We can't allow it to be toggled on live file systems or incorrect
275 * behavior may be seen from cifs clients
277 * This property isn't registered via dsl_prop_register(), but this callback
278 * will be called when a file system is first mounted
281 nbmand_changed_cb(void *arg, uint64_t newval)
283 zfsvfs_t *zfsvfs = arg;
284 if (newval == FALSE) {
285 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
286 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
288 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
289 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
294 snapdir_changed_cb(void *arg, uint64_t newval)
296 zfsvfs_t *zfsvfs = arg;
298 zfsvfs->z_show_ctldir = newval;
302 vscan_changed_cb(void *arg, uint64_t newval)
304 zfsvfs_t *zfsvfs = arg;
306 zfsvfs->z_vscan = newval;
310 acl_inherit_changed_cb(void *arg, uint64_t newval)
312 zfsvfs_t *zfsvfs = arg;
314 zfsvfs->z_acl_inherit = newval;
318 zfs_register_callbacks(vfs_t *vfsp)
320 struct dsl_dataset *ds = NULL;
322 zfsvfs_t *zfsvfs = NULL;
324 int readonly, do_readonly = B_FALSE;
325 int setuid, do_setuid = B_FALSE;
326 int exec, do_exec = B_FALSE;
327 int devices, do_devices = B_FALSE;
328 int xattr, do_xattr = B_FALSE;
329 int atime, do_atime = B_FALSE;
333 zfsvfs = vfsp->vfs_data;
338 * The act of registering our callbacks will destroy any mount
339 * options we may have. In order to enable temporary overrides
340 * of mount options, we stash away the current values and
341 * restore them after we register the callbacks.
343 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
344 !spa_writeable(dmu_objset_spa(os))) {
346 do_readonly = B_TRUE;
347 } else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
349 do_readonly = B_TRUE;
351 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
357 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) {
360 } else if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL)) {
365 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
368 } else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
373 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
376 } else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
380 if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
383 } else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
387 if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
390 } else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
396 * nbmand is a special property. It can only be changed at
399 * This is weird, but it is documented to only be changeable
402 if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
404 } else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
407 char osname[MAXNAMELEN];
409 dmu_objset_name(os, osname);
410 if ((error = dsl_prop_get_integer(osname, "nbmand", &nbmand,
417 * Register property callbacks.
419 * It would probably be fine to just check for i/o error from
420 * the first prop_register(), but I guess I like to go
423 ds = dmu_objset_ds(os);
424 error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs);
425 error = error ? error : dsl_prop_register(ds,
426 "xattr", xattr_changed_cb, zfsvfs);
427 error = error ? error : dsl_prop_register(ds,
428 "recordsize", blksz_changed_cb, zfsvfs);
429 error = error ? error : dsl_prop_register(ds,
430 "readonly", readonly_changed_cb, zfsvfs);
431 error = error ? error : dsl_prop_register(ds,
432 "devices", devices_changed_cb, zfsvfs);
433 error = error ? error : dsl_prop_register(ds,
434 "setuid", setuid_changed_cb, zfsvfs);
435 error = error ? error : dsl_prop_register(ds,
436 "exec", exec_changed_cb, zfsvfs);
437 error = error ? error : dsl_prop_register(ds,
438 "snapdir", snapdir_changed_cb, zfsvfs);
439 error = error ? error : dsl_prop_register(ds,
440 "aclinherit", acl_inherit_changed_cb, zfsvfs);
441 error = error ? error : dsl_prop_register(ds,
442 "vscan", vscan_changed_cb, zfsvfs);
447 * Invoke our callbacks to restore temporary mount options.
450 readonly_changed_cb(zfsvfs, readonly);
452 setuid_changed_cb(zfsvfs, setuid);
454 exec_changed_cb(zfsvfs, exec);
456 devices_changed_cb(zfsvfs, devices);
458 xattr_changed_cb(zfsvfs, xattr);
460 atime_changed_cb(zfsvfs, atime);
462 nbmand_changed_cb(zfsvfs, nbmand);
468 * We may attempt to unregister some callbacks that are not
469 * registered, but this is OK; it will simply return ENOMSG,
470 * which we will ignore.
472 (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs);
473 (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs);
474 (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs);
475 (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs);
476 (void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zfsvfs);
477 (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
478 (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
479 (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
480 (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
482 (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zfsvfs);
486 EXPORT_SYMBOL(zfs_register_callbacks);
487 #endif /* HAVE_ZPL */
490 zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
491 uint64_t *userp, uint64_t *groupp)
493 znode_phys_t *znp = data;
497 * Is it a valid type of object to track?
499 if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
503 * If we have a NULL data pointer
504 * then assume the id's aren't changing and
505 * return EEXIST to the dmu to let it know to
511 if (bonustype == DMU_OT_ZNODE) {
512 *userp = znp->zp_uid;
513 *groupp = znp->zp_gid;
517 ASSERT(bonustype == DMU_OT_SA);
518 hdrsize = sa_hdrsize(data);
521 *userp = *((uint64_t *)((uintptr_t)data + hdrsize +
523 *groupp = *((uint64_t *)((uintptr_t)data + hdrsize +
527 * This should only happen for newly created
528 * files that haven't had the znode data filled
540 fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
541 char *domainbuf, int buflen, uid_t *ridp)
546 fuid = strtonum(fuidstr, NULL);
548 domain = zfs_fuid_find_by_idx(zfsvfs, FUID_INDEX(fuid));
550 (void) strlcpy(domainbuf, domain, buflen);
553 *ridp = FUID_RID(fuid);
557 zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
560 case ZFS_PROP_USERUSED:
561 return (DMU_USERUSED_OBJECT);
562 case ZFS_PROP_GROUPUSED:
563 return (DMU_GROUPUSED_OBJECT);
564 case ZFS_PROP_USERQUOTA:
565 return (zfsvfs->z_userquota_obj);
566 case ZFS_PROP_GROUPQUOTA:
567 return (zfsvfs->z_groupquota_obj);
575 zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
576 uint64_t *cookiep, void *vbuf, uint64_t *bufsizep)
581 zfs_useracct_t *buf = vbuf;
584 if (!dmu_objset_userspace_present(zfsvfs->z_os))
587 obj = zfs_userquota_prop_to_obj(zfsvfs, type);
593 for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep);
594 (error = zap_cursor_retrieve(&zc, &za)) == 0;
595 zap_cursor_advance(&zc)) {
596 if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) >
600 fuidstr_to_sid(zfsvfs, za.za_name,
601 buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid);
603 buf->zu_space = za.za_first_integer;
609 ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep);
610 *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf;
611 *cookiep = zap_cursor_serialize(&zc);
612 zap_cursor_fini(&zc);
615 EXPORT_SYMBOL(zfs_userspace_many);
618 * buf must be big enough (eg, 32 bytes)
621 id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
622 char *buf, boolean_t addok)
627 if (domain && domain[0]) {
628 domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
632 fuid = FUID_ENCODE(domainid, rid);
633 (void) sprintf(buf, "%llx", (longlong_t)fuid);
638 zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
639 const char *domain, uint64_t rid, uint64_t *valp)
647 if (!dmu_objset_userspace_present(zfsvfs->z_os))
650 obj = zfs_userquota_prop_to_obj(zfsvfs, type);
654 err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_FALSE);
658 err = zap_lookup(zfsvfs->z_os, obj, buf, 8, 1, valp);
663 EXPORT_SYMBOL(zfs_userspace_one);
666 zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
667 const char *domain, uint64_t rid, uint64_t quota)
673 boolean_t fuid_dirtied;
675 if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
678 if (zfsvfs->z_version < ZPL_VERSION_USERSPACE)
681 objp = (type == ZFS_PROP_USERQUOTA) ? &zfsvfs->z_userquota_obj :
682 &zfsvfs->z_groupquota_obj;
684 err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_TRUE);
687 fuid_dirtied = zfsvfs->z_fuid_dirty;
689 tx = dmu_tx_create(zfsvfs->z_os);
690 dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
692 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
693 zfs_userquota_prop_prefixes[type]);
696 zfs_fuid_txhold(zfsvfs, tx);
697 err = dmu_tx_assign(tx, TXG_WAIT);
703 mutex_enter(&zfsvfs->z_lock);
705 *objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
707 VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
708 zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
710 mutex_exit(&zfsvfs->z_lock);
713 err = zap_remove(zfsvfs->z_os, *objp, buf, tx);
717 err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, "a, tx);
721 zfs_fuid_sync(zfsvfs, tx);
725 EXPORT_SYMBOL(zfs_set_userquota);
728 zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
731 uint64_t used, quota, usedobj, quotaobj;
734 usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
735 quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
737 if (quotaobj == 0 || zfsvfs->z_replay)
740 (void) sprintf(buf, "%llx", (longlong_t)fuid);
741 err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, "a);
745 err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used);
748 return (used >= quota);
750 EXPORT_SYMBOL(zfs_fuid_overquota);
753 zfs_owner_overquota(zfsvfs_t *zfsvfs, znode_t *zp, boolean_t isgroup)
758 quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
760 fuid = isgroup ? zp->z_gid : zp->z_uid;
762 if (quotaobj == 0 || zfsvfs->z_replay)
765 return (zfs_fuid_overquota(zfsvfs, isgroup, fuid));
767 EXPORT_SYMBOL(zfs_owner_overquota);
770 zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
778 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
781 * We claim to always be readonly so we can open snapshots;
782 * other ZPL code will prevent us from writing to snapshots.
784 error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zfsvfs, &os);
786 kmem_free(zfsvfs, sizeof (zfsvfs_t));
791 * Initialize the zfs-specific filesystem structure.
792 * Should probably make this a kmem cache, shuffle fields,
793 * and just bzero up to z_hold_mtx[].
795 zfsvfs->z_vfs = NULL;
796 zfsvfs->z_parent = zfsvfs;
797 zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
798 zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
801 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
804 } else if (zfsvfs->z_version >
805 zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
806 (void) printk("Can't mount a version %lld file system "
807 "on a version %lld pool\n. Pool must be upgraded to mount "
808 "this file system.", (u_longlong_t)zfsvfs->z_version,
809 (u_longlong_t)spa_version(dmu_objset_spa(os)));
813 if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
815 zfsvfs->z_norm = (int)zval;
817 if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
819 zfsvfs->z_utf8 = (zval != 0);
821 if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
823 zfsvfs->z_case = (uint_t)zval;
826 * Fold case on file systems that are always or sometimes case
829 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
830 zfsvfs->z_case == ZFS_CASE_MIXED)
831 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
833 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
834 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
836 if (zfsvfs->z_use_sa) {
837 /* should either have both of these objects or none */
838 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
844 * Pre SA versions file systems should never touch
845 * either the attribute registration or layout objects.
850 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
851 &zfsvfs->z_attr_table);
855 if (zfsvfs->z_version >= ZPL_VERSION_SA)
856 sa_register_update_callback(os, zfs_sa_upgrade);
858 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
862 ASSERT(zfsvfs->z_root != 0);
864 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
865 &zfsvfs->z_unlinkedobj);
869 error = zap_lookup(os, MASTER_NODE_OBJ,
870 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
871 8, 1, &zfsvfs->z_userquota_obj);
872 if (error && error != ENOENT)
875 error = zap_lookup(os, MASTER_NODE_OBJ,
876 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
877 8, 1, &zfsvfs->z_groupquota_obj);
878 if (error && error != ENOENT)
881 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
882 &zfsvfs->z_fuid_obj);
883 if (error && error != ENOENT)
886 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
887 &zfsvfs->z_shares_dir);
888 if (error && error != ENOENT)
891 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
892 mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
893 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
894 offsetof(znode_t, z_link_node));
895 rrw_init(&zfsvfs->z_teardown_lock);
896 rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
897 rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
898 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
899 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
905 dmu_objset_disown(os, zfsvfs);
907 kmem_free(zfsvfs, sizeof (zfsvfs_t));
912 zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
916 error = zfs_register_callbacks(zfsvfs->z_vfs);
921 * Set the objset user_ptr to track its zfsvfs.
923 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
924 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
925 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
927 zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
930 * If we are not mounting (ie: online recv), then we don't
931 * have to worry about replaying the log as we blocked all
932 * operations out since we closed the ZIL.
938 * During replay we remove the read only flag to
939 * allow replays to succeed.
941 readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
943 zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
945 zfs_unlinked_drain(zfsvfs);
948 * Parse and replay the intent log.
950 * Because of ziltest, this must be done after
951 * zfs_unlinked_drain(). (Further note: ziltest
952 * doesn't use readonly mounts, where
953 * zfs_unlinked_drain() isn't called.) This is because
954 * ziltest causes spa_sync() to think it's committed,
955 * but actually it is not, so the intent log contains
956 * many txg's worth of changes.
958 * In particular, if object N is in the unlinked set in
959 * the last txg to actually sync, then it could be
960 * actually freed in a later txg and then reallocated
961 * in a yet later txg. This would write a "create
962 * object N" record to the intent log. Normally, this
963 * would be fine because the spa_sync() would have
964 * written out the fact that object N is free, before
965 * we could write the "create object N" intent log
968 * But when we are in ziltest mode, we advance the "open
969 * txg" without actually spa_sync()-ing the changes to
970 * disk. So we would see that object N is still
971 * allocated and in the unlinked set, and there is an
972 * intent log record saying to allocate it.
974 if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
975 if (zil_replay_disable) {
976 zil_destroy(zfsvfs->z_log, B_FALSE);
978 zfsvfs->z_replay = B_TRUE;
979 zil_replay(zfsvfs->z_os, zfsvfs,
981 zfsvfs->z_replay = B_FALSE;
984 zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */
991 zfsvfs_free(zfsvfs_t *zfsvfs)
994 extern krwlock_t zfsvfs_lock; /* in zfs_znode.c */
997 * This is a barrier to prevent the filesystem from going away in
998 * zfs_znode_move() until we can safely ensure that the filesystem is
999 * not unmounted. We consider the filesystem valid before the barrier
1000 * and invalid after the barrier.
1002 rw_enter(&zfsvfs_lock, RW_READER);
1003 rw_exit(&zfsvfs_lock);
1005 zfs_fuid_destroy(zfsvfs);
1007 mutex_destroy(&zfsvfs->z_znodes_lock);
1008 mutex_destroy(&zfsvfs->z_lock);
1009 list_destroy(&zfsvfs->z_all_znodes);
1010 rrw_destroy(&zfsvfs->z_teardown_lock);
1011 rw_destroy(&zfsvfs->z_teardown_inactive_lock);
1012 rw_destroy(&zfsvfs->z_fuid_lock);
1013 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1014 mutex_destroy(&zfsvfs->z_hold_mtx[i]);
1015 kmem_free(zfsvfs, sizeof (zfsvfs_t));
1019 zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
1021 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
1022 if (zfsvfs->z_use_fuids && zfsvfs->z_vfs) {
1023 vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
1024 vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
1025 vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
1026 vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
1027 vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
1028 vfs_set_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
1030 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
1034 zfs_domount(vfs_t *vfsp, char *osname)
1036 uint64_t recordsize, fsid_guid;
1043 error = zfsvfs_create(osname, &zfsvfs);
1046 zfsvfs->z_vfs = vfsp;
1048 /* Initialize the generic filesystem structure. */
1049 vfsp->vfs_bcount = 0;
1050 vfsp->vfs_data = NULL;
1052 if ((error = dsl_prop_get_integer(osname, "recordsize",
1053 &recordsize, NULL)))
1056 vfsp->vfs_bsize = recordsize;
1057 vfsp->vfs_flag |= VFS_NOTRUNC;
1058 vfsp->vfs_data = zfsvfs;
1061 * The fsid is 64 bits, composed of an 8-bit fs type, which
1062 * separates our fsid from any other filesystem types, and a
1063 * 56-bit objset unique ID. The objset unique ID is unique to
1064 * all objsets open on this system, provided by unique_create().
1065 * The 8-bit fs type must be put in the low bits of fsid[1]
1066 * because that's where other Solaris filesystems put it.
1068 fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
1069 ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
1070 vfsp->vfs_fsid.val[0] = fsid_guid;
1071 vfsp->vfs_fsid.val[1] = ((fsid_guid>>32) << 8);
1074 * Set features for file system.
1076 zfs_set_fuid_feature(zfsvfs);
1077 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
1078 vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
1079 vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
1080 vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
1081 } else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
1082 vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
1083 vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
1085 vfs_set_feature(vfsp, VFSFT_ZEROCOPY_SUPPORTED);
1087 if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
1090 atime_changed_cb(zfsvfs, B_FALSE);
1091 readonly_changed_cb(zfsvfs, B_TRUE);
1092 if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL)))
1094 xattr_changed_cb(zfsvfs, pval);
1095 zfsvfs->z_issnap = B_TRUE;
1096 zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
1098 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
1099 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
1100 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
1102 error = zfsvfs_setup(zfsvfs, B_TRUE);
1105 if (!zfsvfs->z_issnap)
1106 zfsctl_create(zfsvfs);
1109 dmu_objset_disown(zfsvfs->z_os, zfsvfs);
1110 zfsvfs_free(zfsvfs);
1112 atomic_add_32(&zfs_active_fs_count, 1);
1117 EXPORT_SYMBOL(zfs_domount);
1120 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
1122 objset_t *os = zfsvfs->z_os;
1123 struct dsl_dataset *ds;
1126 * Unregister properties.
1128 if (!dmu_objset_is_snapshot(os)) {
1129 ds = dmu_objset_ds(os);
1130 VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
1133 VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
1136 VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
1139 VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
1142 VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb,
1145 VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
1148 VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
1151 VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
1154 VERIFY(dsl_prop_unregister(ds, "aclinherit",
1155 acl_inherit_changed_cb, zfsvfs) == 0);
1157 VERIFY(dsl_prop_unregister(ds, "vscan",
1158 vscan_changed_cb, zfsvfs) == 0);
1161 EXPORT_SYMBOL(zfs_unregister_callbacks);
1163 #ifdef HAVE_MLSLABEL
1165 * zfs_check_global_label:
1166 * Check that the hex label string is appropriate for the dataset
1167 * being mounted into the global_zone proper.
1169 * Return an error if the hex label string is not default or
1170 * admin_low/admin_high. For admin_low labels, the corresponding
1171 * dataset must be readonly.
1174 zfs_check_global_label(const char *dsname, const char *hexsl)
1176 if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
1178 if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
1180 if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
1181 /* must be readonly */
1184 if (dsl_prop_get_integer(dsname,
1185 zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
1187 return (rdonly ? 0 : EACCES);
1191 #endif /* HAVE_MLSLABEL */
1194 * zfs_mount_label_policy:
1195 * Determine whether the mount is allowed according to MAC check.
1196 * by comparing (where appropriate) label of the dataset against
1197 * the label of the zone being mounted into. If the dataset has
1198 * no label, create one.
1201 * 0 : access allowed
1202 * >0 : error code, such as EACCES
1205 zfs_mount_label_policy(vfs_t *vfsp, char *osname)
1208 zone_t *mntzone = NULL;
1209 ts_label_t *mnt_tsl;
1212 char ds_hexsl[MAXNAMELEN];
1214 retv = EACCES; /* assume the worst */
1217 * Start by getting the dataset label if it exists.
1219 error = dsl_prop_get(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
1220 1, sizeof (ds_hexsl), &ds_hexsl, NULL);
1225 * If labeling is NOT enabled, then disallow the mount of datasets
1226 * which have a non-default label already. No other label checks
1229 if (!is_system_labeled()) {
1230 if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
1236 * Get the label of the mountpoint. If mounting into the global
1237 * zone (i.e. mountpoint is not within an active zone and the
1238 * zoned property is off), the label must be default or
1239 * admin_low/admin_high only; no other checks are needed.
1241 mntzone = zone_find_by_any_path(refstr_value(vfsp->vfs_mntpt), B_FALSE);
1242 if (mntzone->zone_id == GLOBAL_ZONEID) {
1247 if (dsl_prop_get_integer(osname,
1248 zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
1251 return (zfs_check_global_label(osname, ds_hexsl));
1254 * This is the case of a zone dataset being mounted
1255 * initially, before the zone has been fully created;
1256 * allow this mount into global zone.
1261 mnt_tsl = mntzone->zone_slabel;
1262 ASSERT(mnt_tsl != NULL);
1263 label_hold(mnt_tsl);
1264 mnt_sl = label2bslabel(mnt_tsl);
1266 if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0) {
1268 * The dataset doesn't have a real label, so fabricate one.
1272 if (l_to_str_internal(mnt_sl, &str) == 0 &&
1273 dsl_prop_set(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
1274 ZPROP_SRC_LOCAL, 1, strlen(str) + 1, str) == 0)
1277 kmem_free(str, strlen(str) + 1);
1278 } else if (hexstr_to_label(ds_hexsl, &ds_sl) == 0) {
1280 * Now compare labels to complete the MAC check. If the
1281 * labels are equal then allow access. If the mountpoint
1282 * label dominates the dataset label, allow readonly access.
1283 * Otherwise, access is denied.
1285 if (blequal(mnt_sl, &ds_sl))
1287 else if (bldominates(mnt_sl, &ds_sl)) {
1288 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
1293 label_rele(mnt_tsl);
1299 zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
1302 static int zfsrootdone = 0;
1303 zfsvfs_t *zfsvfs = NULL;
1312 * The filesystem that we mount as root is defined in the
1313 * boot property "zfs-bootfs" with a format of
1314 * "poolname/root-dataset-objnum".
1316 if (why == ROOT_INIT) {
1320 * the process of doing a spa_load will require the
1321 * clock to be set before we could (for example) do
1322 * something better by looking at the timestamp on
1323 * an uberblock, so just set it to -1.
1327 if ((zfs_bootfs = spa_get_bootprop("zfs-bootfs")) == NULL) {
1328 cmn_err(CE_NOTE, "spa_get_bootfs: can not get "
1332 zfs_devid = spa_get_bootprop("diskdevid");
1333 error = spa_import_rootpool(rootfs.bo_name, zfs_devid);
1335 spa_free_bootprop(zfs_devid);
1337 spa_free_bootprop(zfs_bootfs);
1338 cmn_err(CE_NOTE, "spa_import_rootpool: error %d",
1342 if (error = zfs_parse_bootfs(zfs_bootfs, rootfs.bo_name)) {
1343 spa_free_bootprop(zfs_bootfs);
1344 cmn_err(CE_NOTE, "zfs_parse_bootfs: error %d",
1349 spa_free_bootprop(zfs_bootfs);
1351 if (error = vfs_lock(vfsp))
1354 if (error = zfs_domount(vfsp, rootfs.bo_name)) {
1355 cmn_err(CE_NOTE, "zfs_domount: error %d", error);
1359 zfsvfs = (zfsvfs_t *)vfsp->vfs_data;
1361 if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &zp)) {
1362 cmn_err(CE_NOTE, "zfs_zget: error %d", error);
1367 mutex_enter(&vp->v_lock);
1368 vp->v_flag |= VROOT;
1369 mutex_exit(&vp->v_lock);
1373 * Leave rootvp held. The root file system is never unmounted.
1376 vfs_add((struct vnode *)0, vfsp,
1377 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
1381 } else if (why == ROOT_REMOUNT) {
1382 readonly_changed_cb(vfsp->vfs_data, B_FALSE);
1383 vfsp->vfs_flag |= VFS_REMOUNT;
1385 /* refresh mount options */
1386 zfs_unregister_callbacks(vfsp->vfs_data);
1387 return (zfs_register_callbacks(vfsp));
1389 } else if (why == ROOT_UNMOUNT) {
1390 zfs_unregister_callbacks((zfsvfs_t *)vfsp->vfs_data);
1391 (void) zfs_sync(vfsp, 0, 0);
1396 * if "why" is equal to anything else other than ROOT_INIT,
1397 * ROOT_REMOUNT, or ROOT_UNMOUNT, we do not support it.
1404 zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
1409 uio_seg_t fromspace = (uap->flags & MS_SYSSPACE) ?
1410 UIO_SYSSPACE : UIO_USERSPACE;
1413 if (mvp->v_type != VDIR)
1416 mutex_enter(&mvp->v_lock);
1417 if ((uap->flags & MS_REMOUNT) == 0 &&
1418 (uap->flags & MS_OVERLAY) == 0 &&
1419 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
1420 mutex_exit(&mvp->v_lock);
1423 mutex_exit(&mvp->v_lock);
1426 * ZFS does not support passing unparsed data in via MS_DATA.
1427 * Users should use the MS_OPTIONSTR interface; this means
1428 * that all option parsing is already done and the options struct
1429 * can be interrogated.
1431 if ((uap->flags & MS_DATA) && uap->datalen > 0)
1435 * Get the objset name (the "special" mount argument).
1437 if ((error = pn_get(uap->spec, fromspace, &spn)))
1440 osname = spn.pn_path;
1443 * Check for mount privilege?
1445 * If we don't have privilege then see if
1446 * we have local permission to allow it
1448 error = secpolicy_fs_mount(cr, mvp, vfsp);
1450 if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) == 0) {
1454 * Make sure user is the owner of the mount point
1455 * or has sufficient privileges.
1458 vattr.va_mask = AT_UID;
1460 if (VOP_GETATTR(mvp, &vattr, 0, cr, NULL)) {
1464 if (secpolicy_vnode_owner(cr, vattr.va_uid) != 0 &&
1465 VOP_ACCESS(mvp, VWRITE, 0, cr, NULL) != 0) {
1468 secpolicy_fs_mount_clearopts(cr, vfsp);
1475 * Refuse to mount a filesystem if we are in a local zone and the
1476 * dataset is not visible.
1478 if (!INGLOBALZONE(curproc) &&
1479 (!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
1484 error = zfs_mount_label_policy(vfsp, osname);
1489 * When doing a remount, we simply refresh our temporary properties
1490 * according to those options set in the current VFS options.
1492 if (uap->flags & MS_REMOUNT) {
1493 /* refresh mount options */
1494 zfs_unregister_callbacks(vfsp->vfs_data);
1495 error = zfs_register_callbacks(vfsp);
1499 error = zfs_domount(vfsp, osname);
1502 * Add an extra VFS_HOLD on our parent vfs so that it can't
1503 * disappear due to a forced unmount.
1505 if (error == 0 && ((zfsvfs_t *)vfsp->vfs_data)->z_issnap)
1506 VFS_HOLD(mvp->v_vfsp);
1514 zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
1516 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1518 uint64_t refdbytes, availbytes, usedobjs, availobjs;
1522 dmu_objset_space(zfsvfs->z_os,
1523 &refdbytes, &availbytes, &usedobjs, &availobjs);
1526 * The underlying storage pool actually uses multiple block sizes.
1527 * We report the fragsize as the smallest block size we support,
1528 * and we report our blocksize as the filesystem's maximum blocksize.
1530 statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT;
1531 statp->f_bsize = zfsvfs->z_max_blksz;
1534 * The following report "total" blocks of various kinds in the
1535 * file system, but reported in terms of f_frsize - the
1539 statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
1540 statp->f_bfree = availbytes >> SPA_MINBLOCKSHIFT;
1541 statp->f_bavail = statp->f_bfree; /* no root reservation */
1544 * statvfs() should really be called statufs(), because it assumes
1545 * static metadata. ZFS doesn't preallocate files, so the best
1546 * we can do is report the max that could possibly fit in f_files,
1547 * and that minus the number actually used in f_ffree.
1548 * For f_ffree, report the smaller of the number of object available
1549 * and the number of blocks (each object will take at least a block).
1551 statp->f_ffree = MIN(availobjs, statp->f_bfree);
1552 statp->f_favail = statp->f_ffree; /* no "root reservation" */
1553 statp->f_files = statp->f_ffree + usedobjs;
1555 (void) cmpldev(&d32, vfsp->vfs_dev);
1556 statp->f_fsid = d32;
1559 * We're a zfs filesystem.
1561 (void) strcpy(statp->f_basetype, MNTTYPE_ZFS);
1563 statp->f_flag = vf_to_stf(vfsp->vfs_flag);
1565 statp->f_namemax = ZFS_MAXNAMELEN;
1568 * We have all of 32 characters to stuff a string here.
1569 * Is there anything useful we could/should provide?
1571 bzero(statp->f_fstr, sizeof (statp->f_fstr));
1576 EXPORT_SYMBOL(zfs_statvfs);
1579 zfs_root(vfs_t *vfsp, vnode_t **vpp)
1581 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1587 error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
1589 *vpp = ZTOV(rootzp);
1594 EXPORT_SYMBOL(zfs_root);
1597 * Teardown the zfsvfs::z_os.
1599 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1600 * and 'z_teardown_inactive_lock' held.
1603 zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
1607 rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
1611 * We purge the parent filesystem's vfsp as the parent
1612 * filesystem and all of its snapshots have their vnode's
1613 * v_vfsp set to the parent's filesystem's vfsp. Note,
1614 * 'z_parent' is self referential for non-snapshots.
1616 (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
1620 * Close the zil. NB: Can't close the zil while zfs_inactive
1621 * threads are blocked as zil_close can call zfs_inactive.
1623 if (zfsvfs->z_log) {
1624 zil_close(zfsvfs->z_log);
1625 zfsvfs->z_log = NULL;
1628 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
1631 * If we are not unmounting (ie: online recv) and someone already
1632 * unmounted this file system while we were doing the switcheroo,
1633 * or a reopen of z_os failed then just bail out now.
1635 if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
1636 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1637 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1642 * At this point there are no vops active, and any new vops will
1643 * fail with EIO since we have z_teardown_lock for writer (only
1644 * relavent for forced unmount).
1646 * Release all holds on dbufs.
1648 mutex_enter(&zfsvfs->z_znodes_lock);
1649 for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
1650 zp = list_next(&zfsvfs->z_all_znodes, zp))
1652 ASSERT(ZTOV(zp)->v_count > 0);
1653 zfs_znode_dmu_fini(zp);
1655 mutex_exit(&zfsvfs->z_znodes_lock);
1658 * If we are unmounting, set the unmounted flag and let new vops
1659 * unblock. zfs_inactive will have the unmounted behavior, and all
1660 * other vops will fail with EIO.
1663 zfsvfs->z_unmounted = B_TRUE;
1664 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1665 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1669 * z_os will be NULL if there was an error in attempting to reopen
1670 * zfsvfs, so just return as the properties had already been
1671 * unregistered and cached data had been evicted before.
1673 if (zfsvfs->z_os == NULL)
1677 * Unregister properties.
1679 zfs_unregister_callbacks(zfsvfs);
1684 if (dmu_objset_is_dirty_anywhere(zfsvfs->z_os))
1685 if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY))
1686 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1687 (void) dmu_objset_evict_dbufs(zfsvfs->z_os);
1694 zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
1696 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1700 ret = secpolicy_fs_unmount(cr, vfsp);
1702 if (dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
1703 ZFS_DELEG_PERM_MOUNT, cr))
1708 * We purge the parent filesystem's vfsp as the parent filesystem
1709 * and all of its snapshots have their vnode's v_vfsp set to the
1710 * parent's filesystem's vfsp. Note, 'z_parent' is self
1711 * referential for non-snapshots.
1713 (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
1716 * Unmount any snapshots mounted under .zfs before unmounting the
1719 if (zfsvfs->z_ctldir != NULL &&
1720 (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
1724 if (!(fflag & MS_FORCE)) {
1726 * Check the number of active vnodes in the file system.
1727 * Our count is maintained in the vfs structure, but the
1728 * number is off by 1 to indicate a hold on the vfs
1731 * The '.zfs' directory maintains a reference of its
1732 * own, and any active references underneath are
1733 * reflected in the vnode count.
1735 if (zfsvfs->z_ctldir == NULL) {
1736 if (vfsp->vfs_count > 1)
1739 if (vfsp->vfs_count > 2 ||
1740 zfsvfs->z_ctldir->v_count > 1)
1745 vfsp->vfs_flag |= VFS_UNMOUNTED;
1747 VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
1751 * z_os will be NULL if there was an error in
1752 * attempting to reopen zfsvfs.
1756 * Unset the objset user_ptr.
1758 mutex_enter(&os->os_user_ptr_lock);
1759 dmu_objset_set_user(os, NULL);
1760 mutex_exit(&os->os_user_ptr_lock);
1763 * Finally release the objset
1765 dmu_objset_disown(os, zfsvfs);
1769 * We can now safely destroy the '.zfs' directory node.
1771 if (zfsvfs->z_ctldir != NULL)
1772 zfsctl_destroy(zfsvfs);
1776 EXPORT_SYMBOL(zfs_umount);
1779 zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1781 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1783 uint64_t object = 0;
1784 uint64_t fid_gen = 0;
1793 if (fidp->fid_len == LONG_FID_LEN) {
1794 zfid_long_t *zlfid = (zfid_long_t *)fidp;
1795 uint64_t objsetid = 0;
1796 uint64_t setgen = 0;
1798 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1799 objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1801 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1802 setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1806 err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
1812 if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1813 zfid_short_t *zfid = (zfid_short_t *)fidp;
1815 for (i = 0; i < sizeof (zfid->zf_object); i++)
1816 object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1818 for (i = 0; i < sizeof (zfid->zf_gen); i++)
1819 fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1825 /* A zero fid_gen means we are in the .zfs control directories */
1827 (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1828 *vpp = zfsvfs->z_ctldir;
1829 ASSERT(*vpp != NULL);
1830 if (object == ZFSCTL_INO_SNAPDIR) {
1831 VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
1832 0, NULL, NULL, NULL, NULL, NULL) == 0);
1840 gen_mask = -1ULL >> (64 - 8 * i);
1842 dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
1843 if ((err = zfs_zget(zfsvfs, object, &zp))) {
1847 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
1849 zp_gen = zp_gen & gen_mask;
1852 if (zp->z_unlinked || zp_gen != fid_gen) {
1853 dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
1861 zfs_inode_update(VTOZ(*vpp));
1866 EXPORT_SYMBOL(zfs_vget);
1869 * Block out VOPs and close zfsvfs_t::z_os
1871 * Note, if successful, then we return with the 'z_teardown_lock' and
1872 * 'z_teardown_inactive_lock' write held.
1875 zfs_suspend_fs(zfsvfs_t *zfsvfs)
1879 if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
1881 dmu_objset_disown(zfsvfs->z_os, zfsvfs);
1885 EXPORT_SYMBOL(zfs_suspend_fs);
1888 * Reopen zfsvfs_t::z_os and release VOPs.
1891 zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname)
1895 ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
1896 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1898 err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zfsvfs,
1901 zfsvfs->z_os = NULL;
1904 uint64_t sa_obj = 0;
1906 err2 = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
1907 ZFS_SA_ATTRS, 8, 1, &sa_obj);
1909 if ((err || err2) && zfsvfs->z_version >= ZPL_VERSION_SA)
1913 if ((err = sa_setup(zfsvfs->z_os, sa_obj,
1914 zfs_attr_table, ZPL_END, &zfsvfs->z_attr_table)) != 0)
1917 VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
1920 * Attempt to re-establish all the active znodes with
1921 * their dbufs. If a zfs_rezget() fails, then we'll let
1922 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
1923 * when they try to use their znode.
1925 mutex_enter(&zfsvfs->z_znodes_lock);
1926 for (zp = list_head(&zfsvfs->z_all_znodes); zp;
1927 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1928 (void) zfs_rezget(zp);
1930 mutex_exit(&zfsvfs->z_znodes_lock);
1935 /* release the VOPs */
1936 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1937 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1941 * Since we couldn't reopen zfsvfs::z_os, force
1942 * unmount this file system.
1944 if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0)
1945 (void) dounmount(zfsvfs->z_vfs, MS_FORCE, CRED());
1949 EXPORT_SYMBOL(zfs_resume_fs);
1952 zfs_freevfs(vfs_t *vfsp)
1954 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1957 * If this is a snapshot, we have an extra VFS_HOLD on our parent
1958 * from zfs_mount(). Release it here. If we came through
1959 * zfs_mountroot() instead, we didn't grab an extra hold, so
1960 * skip the VFS_RELE for rootvfs.
1962 if (zfsvfs->z_issnap && (vfsp != rootvfs))
1963 VFS_RELE(zfsvfs->z_parent->z_vfs);
1965 zfsvfs_free(zfsvfs);
1967 atomic_add_32(&zfs_active_fs_count, -1);
1969 #endif /* HAVE_ZPL */
1977 dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
1989 zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
1992 objset_t *os = zfsvfs->z_os;
1995 if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1998 if (newvers < zfsvfs->z_version)
2001 if (zfs_spa_version_map(newvers) >
2002 spa_version(dmu_objset_spa(zfsvfs->z_os)))
2005 tx = dmu_tx_create(os);
2006 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
2007 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
2008 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
2010 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2012 error = dmu_tx_assign(tx, TXG_WAIT);
2018 error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
2019 8, 1, &newvers, tx);
2026 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
2029 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
2031 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
2032 DMU_OT_NONE, 0, tx);
2034 error = zap_add(os, MASTER_NODE_OBJ,
2035 ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
2036 ASSERT3U(error, ==, 0);
2038 VERIFY(0 == sa_set_sa_object(os, sa_obj));
2039 sa_register_update_callback(os, zfs_sa_upgrade);
2042 spa_history_log_internal(LOG_DS_UPGRADE,
2043 dmu_objset_spa(os), tx, "oldver=%llu newver=%llu dataset = %llu",
2044 zfsvfs->z_version, newvers, dmu_objset_id(os));
2048 zfsvfs->z_version = newvers;
2050 if (zfsvfs->z_version >= ZPL_VERSION_FUID)
2051 zfs_set_fuid_feature(zfsvfs);
2055 EXPORT_SYMBOL(zfs_set_version);
2056 #endif /* HAVE_ZPL */
2059 * Read a property stored within the master node.
2062 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
2068 * Look up the file system's value for the property. For the
2069 * version property, we look up a slightly different string.
2071 if (prop == ZFS_PROP_VERSION)
2072 pname = ZPL_VERSION_STR;
2074 pname = zfs_prop_to_name(prop);
2077 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
2079 if (error == ENOENT) {
2080 /* No value set, use the default value */
2082 case ZFS_PROP_VERSION:
2083 *value = ZPL_VERSION;
2085 case ZFS_PROP_NORMALIZE:
2086 case ZFS_PROP_UTF8ONLY:
2090 *value = ZFS_CASE_SENSITIVE;