4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * ZFS control directory (a.k.a. ".zfs")
28 * This directory provides a common location for all ZFS meta-objects.
29 * Currently, this is only the 'snapshot' directory, but this may expand in the
30 * future. The elements are built using the GFS primitives, as the hierarchy
31 * does not actually exist on disk.
33 * For 'snapshot', we don't want to have all snapshots always mounted, because
34 * this would take up a huge amount of space in /etc/mnttab. We have three
37 * ctldir ------> snapshotdir -------> snapshot
43 * The 'snapshot' node contains just enough information to lookup '..' and act
44 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
45 * perform an automount of the underlying filesystem and return the
46 * corresponding vnode.
48 * All mounts are handled automatically by the kernel, but unmounts are
49 * (currently) handled from user land. The main reason is that there is no
50 * reliable way to auto-unmount the filesystem when it's "no longer in use".
51 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
52 * unmounts any snapshots within the snapshot directory.
54 * The '.zfs', '.zfs/snapshot', and all directories created under
55 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
56 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
58 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
59 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
60 * However, vnodes within these mounted on file systems have their v_vfsp
61 * fields set to the head filesystem to make NFS happy (see
62 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
63 * so that it cannot be freed until all snapshots have been unmounted.
66 #include <fs/fs_subr.h>
67 #include <sys/zfs_ctldir.h>
68 #include <sys/zfs_ioctl.h>
69 #include <sys/zfs_vfsops.h>
70 #include <sys/vfs_opreg.h>
74 #include <sys/dsl_deleg.h>
75 #include <sys/mount.h>
76 #include <sys/sunddi.h>
78 #include "zfs_namecheck.h"
80 typedef struct zfsctl_node {
81 gfs_dir_t zc_gfs_private;
83 timestruc_t zc_cmtime; /* ctime and mtime, always the same */
86 typedef struct zfsctl_snapdir {
87 zfsctl_node_t sd_node;
99 snapentry_compare(const void *a, const void *b)
101 const zfs_snapentry_t *sa = a;
102 const zfs_snapentry_t *sb = b;
103 int ret = strcmp(sa->se_name, sb->se_name);
113 vnodeops_t *zfsctl_ops_root;
114 vnodeops_t *zfsctl_ops_snapdir;
115 vnodeops_t *zfsctl_ops_snapshot;
116 vnodeops_t *zfsctl_ops_shares;
117 vnodeops_t *zfsctl_ops_shares_dir;
119 static const fs_operation_def_t zfsctl_tops_root[];
120 static const fs_operation_def_t zfsctl_tops_snapdir[];
121 static const fs_operation_def_t zfsctl_tops_snapshot[];
122 static const fs_operation_def_t zfsctl_tops_shares[];
124 static vnode_t *zfsctl_mknode_snapdir(vnode_t *);
125 static vnode_t *zfsctl_mknode_shares(vnode_t *);
126 static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset);
127 static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *);
129 static gfs_opsvec_t zfsctl_opsvec[] = {
130 { ".zfs", zfsctl_tops_root, &zfsctl_ops_root },
131 { ".zfs/snapshot", zfsctl_tops_snapdir, &zfsctl_ops_snapdir },
132 { ".zfs/snapshot/vnode", zfsctl_tops_snapshot, &zfsctl_ops_snapshot },
133 { ".zfs/shares", zfsctl_tops_shares, &zfsctl_ops_shares_dir },
134 { ".zfs/shares/vnode", zfsctl_tops_shares, &zfsctl_ops_shares },
139 * Root directory elements. We only have two entries
140 * snapshot and shares.
142 static gfs_dirent_t zfsctl_root_entries[] = {
143 { "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE },
144 { "shares", zfsctl_mknode_shares, GFS_CACHE_VNODE },
148 /* include . and .. in the calculation */
149 #define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \
150 sizeof (gfs_dirent_t)) + 1)
154 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
155 * directories. This is called from the ZFS init routine, and initializes the
156 * vnode ops vectors that we'll be using.
161 VERIFY(gfs_make_opsvec(zfsctl_opsvec) == 0);
168 * Remove vfsctl vnode ops
171 vn_freevnodeops(zfsctl_ops_root);
172 if (zfsctl_ops_snapdir)
173 vn_freevnodeops(zfsctl_ops_snapdir);
174 if (zfsctl_ops_snapshot)
175 vn_freevnodeops(zfsctl_ops_snapshot);
176 if (zfsctl_ops_shares)
177 vn_freevnodeops(zfsctl_ops_shares);
178 if (zfsctl_ops_shares_dir)
179 vn_freevnodeops(zfsctl_ops_shares_dir);
181 zfsctl_ops_root = NULL;
182 zfsctl_ops_snapdir = NULL;
183 zfsctl_ops_snapshot = NULL;
184 zfsctl_ops_shares = NULL;
185 zfsctl_ops_shares_dir = NULL;
189 zfsctl_is_node(vnode_t *vp)
191 return (vn_matchops(vp, zfsctl_ops_root) ||
192 vn_matchops(vp, zfsctl_ops_snapdir) ||
193 vn_matchops(vp, zfsctl_ops_snapshot) ||
194 vn_matchops(vp, zfsctl_ops_shares) ||
195 vn_matchops(vp, zfsctl_ops_shares_dir));
200 * Return the inode number associated with the 'snapshot' or
201 * 'shares' directory.
205 zfsctl_root_inode_cb(vnode_t *vp, int index)
207 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
212 return (ZFSCTL_INO_SNAPDIR);
214 return (zfsvfs->z_shares_dir);
218 * Create the '.zfs' directory. This directory is cached as part of the VFS
219 * structure. This results in a hold on the vfs_t. The code in zfs_umount()
220 * therefore checks against a vfs_count of 2 instead of 1. This reference
221 * is removed when the ctldir is destroyed in the unmount.
224 zfsctl_create(zfsvfs_t *zfsvfs)
230 ASSERT(zfsvfs->z_ctldir == NULL);
232 vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
233 zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
234 zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
236 zcp->zc_id = ZFSCTL_INO_ROOT;
238 VERIFY(VFS_ROOT(zfsvfs->z_vfs, &rvp) == 0);
239 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
240 &crtime, sizeof (crtime)));
241 ZFS_TIME_DECODE(&zcp->zc_cmtime, crtime);
245 * We're only faking the fact that we have a root of a filesystem for
246 * the sake of the GFS interfaces. Undo the flag manipulation it did
249 vp->v_flag &= ~(VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT);
251 zfsvfs->z_ctldir = vp;
255 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
256 * There might still be more references if we were force unmounted, but only
257 * new zfs_inactive() calls can occur and they don't reference .zfs
260 zfsctl_destroy(zfsvfs_t *zfsvfs)
262 VN_RELE(zfsvfs->z_ctldir);
263 zfsvfs->z_ctldir = NULL;
267 * Given a root znode, retrieve the associated .zfs directory.
268 * Add a hold to the vnode and return it.
271 zfsctl_root(znode_t *zp)
273 ASSERT(zfs_has_ctldir(zp));
274 VN_HOLD(zp->z_zfsvfs->z_ctldir);
275 return (zp->z_zfsvfs->z_ctldir);
279 * Common open routine. Disallow any write access.
283 zfsctl_common_open(vnode_t **vpp, int flags, cred_t *cr, caller_context_t *ct)
292 * Common close routine. Nothing to do here.
296 zfsctl_common_close(vnode_t *vpp, int flags, int count, offset_t off,
297 cred_t *cr, caller_context_t *ct)
303 * Common access routine. Disallow writes.
307 zfsctl_common_access(vnode_t *vp, int mode, int flags, cred_t *cr,
308 caller_context_t *ct)
310 if (flags & V_ACE_MASK) {
311 if (mode & ACE_ALL_WRITE_PERMS)
322 * Common getattr function. Fill in basic information.
325 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
333 * We are a purely virtual object, so we have no
334 * blocksize or allocated blocks.
339 vap->va_fsid = vp->v_vfsp->vfs_dev;
340 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
344 * We live in the now (for atime).
352 zfsctl_common_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
354 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
355 zfsctl_node_t *zcp = vp->v_data;
356 uint64_t object = zcp->zc_id;
362 if (fidp->fid_len < SHORT_FID_LEN) {
363 fidp->fid_len = SHORT_FID_LEN;
368 zfid = (zfid_short_t *)fidp;
370 zfid->zf_len = SHORT_FID_LEN;
372 for (i = 0; i < sizeof (zfid->zf_object); i++)
373 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
375 /* .zfs znodes always have a generation number of 0 */
376 for (i = 0; i < sizeof (zfid->zf_gen); i++)
386 zfsctl_shares_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
388 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
394 if (zfsvfs->z_shares_dir == 0) {
399 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
400 error = VOP_FID(ZTOV(dzp), fidp, ct);
408 * .zfs inode namespace
410 * We need to generate unique inode numbers for all files and directories
411 * within the .zfs pseudo-filesystem. We use the following scheme:
416 * .zfs/snapshot/<snap> objectid(snap)
419 #define ZFSCTL_INO_SNAP(id) (id)
422 * Get root directory attributes.
426 zfsctl_root_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
427 caller_context_t *ct)
429 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
430 zfsctl_node_t *zcp = vp->v_data;
433 vap->va_nodeid = ZFSCTL_INO_ROOT;
434 vap->va_nlink = vap->va_size = NROOT_ENTRIES;
435 vap->va_mtime = vap->va_ctime = zcp->zc_cmtime;
437 zfsctl_common_getattr(vp, vap);
444 * Special case the handling of "..".
448 zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
449 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
450 int *direntflags, pathname_t *realpnp)
452 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
456 * No extended attributes allowed under .zfs
458 if (flags & LOOKUP_XATTR)
463 if (strcmp(nm, "..") == 0) {
464 err = VFS_ROOT(dvp->v_vfsp, vpp);
466 err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
467 cr, ct, direntflags, realpnp);
476 zfsctl_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
477 caller_context_t *ct)
480 * We only care about ACL_ENABLED so that libsec can
481 * display ACL correctly and not default to POSIX draft.
483 if (cmd == _PC_ACL_ENABLED) {
484 *valp = _ACL_ACE_ENABLED;
488 return (fs_pathconf(vp, cmd, valp, cr, ct));
491 static const fs_operation_def_t zfsctl_tops_root[] = {
492 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
493 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
494 { VOPNAME_IOCTL, { .error = fs_inval } },
495 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_root_getattr } },
496 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
497 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
498 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_root_lookup } },
499 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
500 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } },
501 { VOPNAME_PATHCONF, { .vop_pathconf = zfsctl_pathconf } },
502 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
507 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
509 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
511 if (snapshot_namecheck(name, NULL, NULL) != 0)
513 dmu_objset_name(os, zname);
514 if (strlen(zname) + 1 + strlen(name) >= len)
515 return (ENAMETOOLONG);
516 (void) strcat(zname, "@");
517 (void) strcat(zname, name);
522 zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
524 vnode_t *svp = sep->se_root;
527 ASSERT(vn_ismntpt(svp));
529 /* this will be dropped by dounmount() */
530 if ((error = vn_vfswlock(svp)) != 0)
534 error = dounmount(vn_mountedvfs(svp), fflags, cr);
541 * We can't use VN_RELE(), as that will try to invoke
542 * zfsctl_snapdir_inactive(), which would cause us to destroy
543 * the sd_lock mutex held by our caller.
545 ASSERT(svp->v_count == 1);
546 gfs_vop_inactive(svp, cr, NULL);
548 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
549 kmem_free(sep, sizeof (zfs_snapentry_t));
555 zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
560 char newpath[MAXNAMELEN];
563 ASSERT(MUTEX_HELD(&sdp->sd_lock));
566 vfsp = vn_mountedvfs(sep->se_root);
567 ASSERT(vfsp != NULL);
572 * Change the name in the AVL tree.
574 avl_remove(&sdp->sd_snaps, sep);
575 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
576 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
577 (void) strcpy(sep->se_name, nm);
578 VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
579 avl_insert(&sdp->sd_snaps, sep, where);
582 * Change the current mountpoint info:
583 * - update the tail of the mntpoint path
584 * - update the tail of the resource path
586 pathref = vfs_getmntpoint(vfsp);
587 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
588 VERIFY((tail = strrchr(newpath, '/')) != NULL);
590 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
591 (void) strcat(newpath, nm);
592 refstr_rele(pathref);
593 vfs_setmntpoint(vfsp, newpath);
595 pathref = vfs_getresource(vfsp);
596 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
597 VERIFY((tail = strrchr(newpath, '@')) != NULL);
599 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
600 (void) strcat(newpath, nm);
601 refstr_rele(pathref);
602 vfs_setresource(vfsp, newpath);
609 zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
610 cred_t *cr, caller_context_t *ct, int flags)
612 zfsctl_snapdir_t *sdp = sdvp->v_data;
613 zfs_snapentry_t search, *sep;
616 char from[MAXNAMELEN], to[MAXNAMELEN];
617 char real[MAXNAMELEN];
620 zfsvfs = sdvp->v_vfsp->vfs_data;
623 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
624 err = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
628 } else if (err != ENOTSUP) {
636 err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from);
638 err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to);
640 err = zfs_secpolicy_rename_perms(from, to, cr);
645 * Cannot move snapshots out of the snapdir.
650 if (strcmp(snm, tnm) == 0)
653 mutex_enter(&sdp->sd_lock);
655 search.se_name = (char *)snm;
656 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
657 mutex_exit(&sdp->sd_lock);
661 err = dmu_objset_rename(from, to, B_FALSE);
663 zfsctl_rename_snap(sdp, sep, tnm);
665 mutex_exit(&sdp->sd_lock);
672 zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
673 caller_context_t *ct, int flags)
675 zfsctl_snapdir_t *sdp = dvp->v_data;
676 zfs_snapentry_t *sep;
677 zfs_snapentry_t search;
679 char snapname[MAXNAMELEN];
680 char real[MAXNAMELEN];
683 zfsvfs = dvp->v_vfsp->vfs_data;
686 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
688 err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
692 } else if (err != ENOTSUP) {
700 err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname);
702 err = zfs_secpolicy_destroy_perms(snapname, cr);
706 mutex_enter(&sdp->sd_lock);
708 search.se_name = name;
709 sep = avl_find(&sdp->sd_snaps, &search, NULL);
711 avl_remove(&sdp->sd_snaps, sep);
712 err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
714 avl_add(&sdp->sd_snaps, sep);
716 err = dmu_objset_destroy(snapname, B_FALSE);
721 mutex_exit(&sdp->sd_lock);
727 * This creates a snapshot under '.zfs/snapshot'.
731 zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp,
732 cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp)
734 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
735 char name[MAXNAMELEN];
737 static enum symfollow follow = NO_FOLLOW;
738 static enum uio_seg seg = UIO_SYSSPACE;
740 if (snapshot_namecheck(dirname, NULL, NULL) != 0)
743 dmu_objset_name(zfsvfs->z_os, name);
747 err = zfs_secpolicy_snapshot_perms(name, cr);
752 err = dmu_objset_snapshot(name, dirname, NULL, B_FALSE);
755 err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp);
762 * Lookup entry point for the 'snapshot' directory. Try to open the
763 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
764 * Perform a mount of the associated dataset on top of the vnode.
768 zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
769 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
770 int *direntflags, pathname_t *realpnp)
772 zfsctl_snapdir_t *sdp = dvp->v_data;
774 char snapname[MAXNAMELEN];
775 char real[MAXNAMELEN];
777 zfs_snapentry_t *sep, search;
780 size_t mountpoint_len;
782 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
786 * No extended attributes allowed under .zfs
788 if (flags & LOOKUP_XATTR)
791 ASSERT(dvp->v_type == VDIR);
794 * If we get a recursive call, that means we got called
795 * from the domount() code while it was trying to look up the
796 * spec (which looks like a local path for zfs). We need to
797 * add some flag to domount() to tell it not to do this lookup.
799 if (MUTEX_HELD(&sdp->sd_lock))
804 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) {
809 if (flags & FIGNORECASE) {
810 boolean_t conflict = B_FALSE;
812 err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
813 MAXNAMELEN, &conflict);
816 } else if (err != ENOTSUP) {
821 (void) strlcpy(realpnp->pn_buf, nm,
822 realpnp->pn_bufsize);
823 if (conflict && direntflags)
824 *direntflags = ED_CASE_CONFLICT;
827 mutex_enter(&sdp->sd_lock);
828 search.se_name = (char *)nm;
829 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
836 } else if (*vpp == sep->se_root) {
838 * The snapshot was unmounted behind our backs,
844 * VROOT was set during the traverse call. We need
845 * to clear it since we're pretending to be part
846 * of our parent's vfs.
848 (*vpp)->v_flag &= ~VROOT;
850 mutex_exit(&sdp->sd_lock);
856 * The requested snapshot is not currently mounted, look it up.
858 err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname);
860 mutex_exit(&sdp->sd_lock);
863 * handle "ls *" or "?" in a graceful manner,
864 * forcing EILSEQ to ENOENT.
865 * Since shell ultimately passes "*" or "?" as name to lookup
867 return (err == EILSEQ ? ENOENT : err);
869 if (dmu_objset_hold(snapname, FTAG, &snap) != 0) {
870 mutex_exit(&sdp->sd_lock);
875 sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
876 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
877 (void) strcpy(sep->se_name, nm);
878 *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
879 avl_insert(&sdp->sd_snaps, sep, where);
881 dmu_objset_rele(snap, FTAG);
883 mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) +
884 strlen("/.zfs/snapshot/") + strlen(nm) + 1;
885 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
886 (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
887 refstr_value(dvp->v_vfsp->vfs_mntpt), nm);
889 margs.spec = snapname;
890 margs.dir = mountpoint;
891 margs.flags = MS_SYSSPACE | MS_NOMNTTAB;
892 margs.fstype = "zfs";
893 margs.dataptr = NULL;
898 err = domount("zfs", &margs, *vpp, kcred, &vfsp);
899 kmem_free(mountpoint, mountpoint_len);
903 * Return the mounted root rather than the covered mount point.
904 * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns
905 * the ZFS vnode mounted on top of the GFS node. This ZFS
906 * vnode is the root of the newly created vfsp.
914 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
916 * This is where we lie about our v_vfsp in order to
917 * make .zfs/snapshot/<snapname> accessible over NFS
918 * without requiring manual mounts of <snapname>.
920 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
921 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
922 (*vpp)->v_vfsp = zfsvfs->z_vfs;
923 (*vpp)->v_flag &= ~VROOT;
925 mutex_exit(&sdp->sd_lock);
929 * If we had an error, drop our hold on the vnode and
930 * zfsctl_snapshot_inactive() will clean up.
941 zfsctl_shares_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
942 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
943 int *direntflags, pathname_t *realpnp)
945 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
951 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) {
956 if (zfsvfs->z_shares_dir == 0) {
960 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0)
961 error = VOP_LOOKUP(ZTOV(dzp), nm, vpp, pnp,
962 flags, rdir, cr, ct, direntflags, realpnp);
972 zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp,
973 offset_t *offp, offset_t *nextp, void *data, int flags)
975 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
976 char snapname[MAXNAMELEN];
978 boolean_t case_conflict;
984 error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id,
985 &cookie, &case_conflict);
988 if (error == ENOENT) {
995 if (flags & V_RDDIR_ENTFLAGS) {
996 edirent_t *eodp = dp;
998 (void) strcpy(eodp->ed_name, snapname);
999 eodp->ed_ino = ZFSCTL_INO_SNAP(id);
1000 eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0;
1002 struct dirent64 *odp = dp;
1004 (void) strcpy(odp->d_name, snapname);
1005 odp->d_ino = ZFSCTL_INO_SNAP(id);
1016 zfsctl_shares_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp,
1017 caller_context_t *ct, int flags)
1019 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1025 if (zfsvfs->z_shares_dir == 0) {
1029 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
1030 error = VOP_READDIR(ZTOV(dzp), uiop, cr, eofp, ct, flags);
1042 * pvp is the '.zfs' directory (zfsctl_node_t).
1043 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
1045 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
1046 * when a lookup is performed on .zfs for "snapshot".
1049 zfsctl_mknode_snapdir(vnode_t *pvp)
1052 zfsctl_snapdir_t *sdp;
1054 vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp,
1055 zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN,
1056 zfsctl_snapdir_readdir_cb, NULL);
1058 sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR;
1059 sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
1060 mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
1061 avl_create(&sdp->sd_snaps, snapentry_compare,
1062 sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
1067 zfsctl_mknode_shares(vnode_t *pvp)
1072 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp,
1073 zfsctl_ops_shares, NULL, NULL, MAXNAMELEN,
1076 sdp->zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
1083 zfsctl_shares_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1084 caller_context_t *ct)
1086 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1091 if (zfsvfs->z_shares_dir == 0) {
1095 if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
1096 error = VOP_GETATTR(ZTOV(dzp), vap, flags, cr, ct);
1107 zfsctl_snapdir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1108 caller_context_t *ct)
1110 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1111 zfsctl_snapdir_t *sdp = vp->v_data;
1114 zfsctl_common_getattr(vp, vap);
1115 vap->va_nodeid = gfs_file_inode(vp);
1116 vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2;
1117 vap->va_ctime = vap->va_mtime = dmu_objset_snap_cmtime(zfsvfs->z_os);
1125 zfsctl_snapdir_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1127 zfsctl_snapdir_t *sdp = vp->v_data;
1130 private = gfs_dir_inactive(vp);
1131 if (private != NULL) {
1132 ASSERT(avl_numnodes(&sdp->sd_snaps) == 0);
1133 mutex_destroy(&sdp->sd_lock);
1134 avl_destroy(&sdp->sd_snaps);
1135 kmem_free(private, sizeof (zfsctl_snapdir_t));
1139 static const fs_operation_def_t zfsctl_tops_snapdir[] = {
1140 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
1141 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
1142 { VOPNAME_IOCTL, { .error = fs_inval } },
1143 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_snapdir_getattr } },
1144 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
1145 { VOPNAME_RENAME, { .vop_rename = zfsctl_snapdir_rename } },
1146 { VOPNAME_RMDIR, { .vop_rmdir = zfsctl_snapdir_remove } },
1147 { VOPNAME_MKDIR, { .vop_mkdir = zfsctl_snapdir_mkdir } },
1148 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
1149 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_snapdir_lookup } },
1150 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
1151 { VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapdir_inactive } },
1152 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
1156 static const fs_operation_def_t zfsctl_tops_shares[] = {
1157 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
1158 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
1159 { VOPNAME_IOCTL, { .error = fs_inval } },
1160 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_shares_getattr } },
1161 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
1162 { VOPNAME_READDIR, { .vop_readdir = zfsctl_shares_readdir } },
1163 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_shares_lookup } },
1164 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
1165 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } },
1166 { VOPNAME_FID, { .vop_fid = zfsctl_shares_fid } },
1171 * pvp is the GFS vnode '.zfs/snapshot'.
1173 * This creates a GFS node under '.zfs/snapshot' representing each
1174 * snapshot. This newly created GFS node is what we mount snapshot
1178 zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset)
1183 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp,
1184 zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL);
1186 zcp->zc_id = objset;
1192 zfsctl_snapshot_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
1194 zfsctl_snapdir_t *sdp;
1195 zfs_snapentry_t *sep, *next;
1198 VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0);
1201 mutex_enter(&sdp->sd_lock);
1203 if (vp->v_count > 1) {
1204 mutex_exit(&sdp->sd_lock);
1207 ASSERT(!vn_ismntpt(vp));
1209 sep = avl_first(&sdp->sd_snaps);
1210 while (sep != NULL) {
1211 next = AVL_NEXT(&sdp->sd_snaps, sep);
1213 if (sep->se_root == vp) {
1214 avl_remove(&sdp->sd_snaps, sep);
1215 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1216 kmem_free(sep, sizeof (zfs_snapentry_t));
1221 ASSERT(sep != NULL);
1223 mutex_exit(&sdp->sd_lock);
1227 * Dispose of the vnode for the snapshot mount point.
1228 * This is safe to do because once this entry has been removed
1229 * from the AVL tree, it can't be found again, so cannot become
1230 * "active". If we lookup the same name again we will end up
1231 * creating a new vnode.
1233 gfs_vop_inactive(vp, cr, ct);
1238 * These VP's should never see the light of day. They should always
1241 static const fs_operation_def_t zfsctl_tops_snapshot[] = {
1242 VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapshot_inactive },
1247 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1249 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1251 zfsctl_snapdir_t *sdp;
1253 zfs_snapentry_t *sep;
1256 ASSERT(zfsvfs->z_ctldir != NULL);
1257 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1258 NULL, 0, NULL, kcred, NULL, NULL, NULL);
1263 mutex_enter(&sdp->sd_lock);
1264 sep = avl_first(&sdp->sd_snaps);
1265 while (sep != NULL) {
1268 if (zcp->zc_id == objsetid)
1271 sep = AVL_NEXT(&sdp->sd_snaps, sep);
1277 * Return the mounted root rather than the covered mount point.
1278 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1279 * and returns the ZFS vnode mounted on top of the GFS node.
1280 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1282 error = traverse(&vp);
1284 if (vp == sep->se_root)
1287 *zfsvfsp = VTOZ(vp)->z_zfsvfs;
1289 mutex_exit(&sdp->sd_lock);
1293 mutex_exit(&sdp->sd_lock);
1302 * Unmount any snapshots for the given filesystem. This is called from
1303 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1307 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1309 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1311 zfsctl_snapdir_t *sdp;
1312 zfs_snapentry_t *sep, *next;
1315 ASSERT(zfsvfs->z_ctldir != NULL);
1316 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1317 NULL, 0, NULL, cr, NULL, NULL, NULL);
1322 mutex_enter(&sdp->sd_lock);
1324 sep = avl_first(&sdp->sd_snaps);
1325 while (sep != NULL) {
1326 next = AVL_NEXT(&sdp->sd_snaps, sep);
1329 * If this snapshot is not mounted, then it must
1330 * have just been unmounted by somebody else, and
1331 * will be cleaned up by zfsctl_snapdir_inactive().
1333 if (vn_ismntpt(sep->se_root)) {
1334 avl_remove(&sdp->sd_snaps, sep);
1335 error = zfsctl_unmount_snap(sep, fflags, cr);
1337 avl_add(&sdp->sd_snaps, sep);
1344 mutex_exit(&sdp->sd_lock);