4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)zfs_ctldir.c 1.20 08/04/27 SMI"
29 * ZFS control directory (a.k.a. ".zfs")
31 * This directory provides a common location for all ZFS meta-objects.
32 * Currently, this is only the 'snapshot' directory, but this may expand in the
33 * future. The elements are built using the GFS primitives, as the hierarchy
34 * does not actually exist on disk.
36 * For 'snapshot', we don't want to have all snapshots always mounted, because
37 * this would take up a huge amount of space in /etc/mnttab. We have three
40 * ctldir ------> snapshotdir -------> snapshot
46 * The 'snapshot' node contains just enough information to lookup '..' and act
47 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
48 * perform an automount of the underlying filesystem and return the
49 * corresponding vnode.
51 * All mounts are handled automatically by the kernel, but unmounts are
52 * (currently) handled from user land. The main reason is that there is no
53 * reliable way to auto-unmount the filesystem when it's "no longer in use".
54 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
55 * unmounts any snapshots within the snapshot directory.
57 * The '.zfs', '.zfs/snapshot', and all directories created under
58 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
59 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
61 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
62 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
63 * However, vnodes within these mounted on file systems have their v_vfsp
64 * fields set to the head filesystem to make NFS happy (see
65 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
66 * so that it cannot be freed until all snapshots have been unmounted.
69 #include <fs/fs_subr.h>
70 #include <sys/zfs_ctldir.h>
71 #include <sys/zfs_ioctl.h>
72 #include <sys/zfs_vfsops.h>
73 #include <sys/vfs_opreg.h>
77 #include <sys/dsl_deleg.h>
78 #include <sys/mount.h>
79 #include <sys/sunddi.h>
81 typedef struct zfsctl_node {
82 gfs_dir_t zc_gfs_private;
84 timestruc_t zc_cmtime; /* ctime and mtime, always the same */
87 typedef struct zfsctl_snapdir {
88 zfsctl_node_t sd_node;
100 snapentry_compare(const void *a, const void *b)
102 const zfs_snapentry_t *sa = a;
103 const zfs_snapentry_t *sb = b;
104 int ret = strcmp(sa->se_name, sb->se_name);
114 vnodeops_t *zfsctl_ops_root;
115 vnodeops_t *zfsctl_ops_snapdir;
116 vnodeops_t *zfsctl_ops_snapshot;
118 static const fs_operation_def_t zfsctl_tops_root[];
119 static const fs_operation_def_t zfsctl_tops_snapdir[];
120 static const fs_operation_def_t zfsctl_tops_snapshot[];
122 static vnode_t *zfsctl_mknode_snapdir(vnode_t *);
123 static vnode_t *zfsctl_snapshot_mknode(vnode_t *, uint64_t objset);
124 static int zfsctl_unmount_snap(zfs_snapentry_t *, int, cred_t *);
126 static gfs_opsvec_t zfsctl_opsvec[] = {
127 { ".zfs", zfsctl_tops_root, &zfsctl_ops_root },
128 { ".zfs/snapshot", zfsctl_tops_snapdir, &zfsctl_ops_snapdir },
129 { ".zfs/snapshot/vnode", zfsctl_tops_snapshot, &zfsctl_ops_snapshot },
134 * Root directory elements. We have only a single static entry, 'snapshot'.
136 static gfs_dirent_t zfsctl_root_entries[] = {
137 { "snapshot", zfsctl_mknode_snapdir, GFS_CACHE_VNODE },
141 /* include . and .. in the calculation */
142 #define NROOT_ENTRIES ((sizeof (zfsctl_root_entries) / \
143 sizeof (gfs_dirent_t)) + 1)
147 * Initialize the various GFS pieces we'll need to create and manipulate .zfs
148 * directories. This is called from the ZFS init routine, and initializes the
149 * vnode ops vectors that we'll be using.
154 VERIFY(gfs_make_opsvec(zfsctl_opsvec) == 0);
161 * Remove vfsctl vnode ops
164 vn_freevnodeops(zfsctl_ops_root);
165 if (zfsctl_ops_snapdir)
166 vn_freevnodeops(zfsctl_ops_snapdir);
167 if (zfsctl_ops_snapshot)
168 vn_freevnodeops(zfsctl_ops_snapshot);
170 zfsctl_ops_root = NULL;
171 zfsctl_ops_snapdir = NULL;
172 zfsctl_ops_snapshot = NULL;
176 * Return the inode number associated with the 'snapshot' directory.
180 zfsctl_root_inode_cb(vnode_t *vp, int index)
183 return (ZFSCTL_INO_SNAPDIR);
187 * Create the '.zfs' directory. This directory is cached as part of the VFS
188 * structure. This results in a hold on the vfs_t. The code in zfs_umount()
189 * therefore checks against a vfs_count of 2 instead of 1. This reference
190 * is removed when the ctldir is destroyed in the unmount.
193 zfsctl_create(zfsvfs_t *zfsvfs)
198 ASSERT(zfsvfs->z_ctldir == NULL);
200 vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
201 zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
202 zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
204 zcp->zc_id = ZFSCTL_INO_ROOT;
206 VERIFY(VFS_ROOT(zfsvfs->z_vfs, &rvp) == 0);
207 ZFS_TIME_DECODE(&zcp->zc_cmtime, VTOZ(rvp)->z_phys->zp_crtime);
211 * We're only faking the fact that we have a root of a filesystem for
212 * the sake of the GFS interfaces. Undo the flag manipulation it did
215 vp->v_flag &= ~(VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT);
217 zfsvfs->z_ctldir = vp;
221 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
222 * There might still be more references if we were force unmounted, but only
223 * new zfs_inactive() calls can occur and they don't reference .zfs
226 zfsctl_destroy(zfsvfs_t *zfsvfs)
228 VN_RELE(zfsvfs->z_ctldir);
229 zfsvfs->z_ctldir = NULL;
233 * Given a root znode, retrieve the associated .zfs directory.
234 * Add a hold to the vnode and return it.
237 zfsctl_root(znode_t *zp)
239 ASSERT(zfs_has_ctldir(zp));
240 VN_HOLD(zp->z_zfsvfs->z_ctldir);
241 return (zp->z_zfsvfs->z_ctldir);
245 * Common open routine. Disallow any write access.
249 zfsctl_common_open(vnode_t **vpp, int flags, cred_t *cr, caller_context_t *ct)
258 * Common close routine. Nothing to do here.
262 zfsctl_common_close(vnode_t *vpp, int flags, int count, offset_t off,
263 cred_t *cr, caller_context_t *ct)
269 * Common access routine. Disallow writes.
273 zfsctl_common_access(vnode_t *vp, int mode, int flags, cred_t *cr,
274 caller_context_t *ct)
283 * Common getattr function. Fill in basic information.
286 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
288 zfsctl_node_t *zcp = vp->v_data;
295 * We are a purly virtual object, so we have no
296 * blocksize or allocated blocks.
301 vap->va_fsid = vp->v_vfsp->vfs_dev;
302 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
306 * We live in the now (for atime).
310 vap->va_mtime = vap->va_ctime = zcp->zc_cmtime;
315 zfsctl_common_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
317 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
318 zfsctl_node_t *zcp = vp->v_data;
319 uint64_t object = zcp->zc_id;
325 if (fidp->fid_len < SHORT_FID_LEN) {
326 fidp->fid_len = SHORT_FID_LEN;
331 zfid = (zfid_short_t *)fidp;
333 zfid->zf_len = SHORT_FID_LEN;
335 for (i = 0; i < sizeof (zfid->zf_object); i++)
336 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
338 /* .zfs znodes always have a generation number of 0 */
339 for (i = 0; i < sizeof (zfid->zf_gen); i++)
347 * .zfs inode namespace
349 * We need to generate unique inode numbers for all files and directories
350 * within the .zfs pseudo-filesystem. We use the following scheme:
355 * .zfs/snapshot/<snap> objectid(snap)
358 #define ZFSCTL_INO_SNAP(id) (id)
361 * Get root directory attributes.
365 zfsctl_root_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
366 caller_context_t *ct)
368 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
371 vap->va_nodeid = ZFSCTL_INO_ROOT;
372 vap->va_nlink = vap->va_size = NROOT_ENTRIES;
374 zfsctl_common_getattr(vp, vap);
381 * Special case the handling of "..".
385 zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
386 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
387 int *direntflags, pathname_t *realpnp)
389 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
393 * No extended attributes allowed under .zfs
395 if (flags & LOOKUP_XATTR)
400 if (strcmp(nm, "..") == 0) {
401 err = VFS_ROOT(dvp->v_vfsp, vpp);
403 err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
404 cr, ct, direntflags, realpnp);
412 static const fs_operation_def_t zfsctl_tops_root[] = {
413 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
414 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
415 { VOPNAME_IOCTL, { .error = fs_inval } },
416 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_root_getattr } },
417 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
418 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
419 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_root_lookup } },
420 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
421 { VOPNAME_INACTIVE, { .vop_inactive = gfs_vop_inactive } },
422 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
427 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
429 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
431 dmu_objset_name(os, zname);
432 if (strlen(zname) + 1 + strlen(name) >= len)
433 return (ENAMETOOLONG);
434 (void) strcat(zname, "@");
435 (void) strcat(zname, name);
440 zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
442 vnode_t *svp = sep->se_root;
445 ASSERT(vn_ismntpt(svp));
447 /* this will be dropped by dounmount() */
448 if ((error = vn_vfswlock(svp)) != 0)
452 error = dounmount(vn_mountedvfs(svp), fflags, cr);
457 VFS_RELE(svp->v_vfsp);
459 * We can't use VN_RELE(), as that will try to invoke
460 * zfsctl_snapdir_inactive(), which would cause us to destroy
461 * the sd_lock mutex held by our caller.
463 ASSERT(svp->v_count == 1);
464 gfs_vop_inactive(svp, cr, NULL);
466 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
467 kmem_free(sep, sizeof (zfs_snapentry_t));
473 zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
478 char newpath[MAXNAMELEN];
481 ASSERT(MUTEX_HELD(&sdp->sd_lock));
484 vfsp = vn_mountedvfs(sep->se_root);
485 ASSERT(vfsp != NULL);
490 * Change the name in the AVL tree.
492 avl_remove(&sdp->sd_snaps, sep);
493 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
494 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
495 (void) strcpy(sep->se_name, nm);
496 VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
497 avl_insert(&sdp->sd_snaps, sep, where);
500 * Change the current mountpoint info:
501 * - update the tail of the mntpoint path
502 * - update the tail of the resource path
504 pathref = vfs_getmntpoint(vfsp);
505 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
506 VERIFY((tail = strrchr(newpath, '/')) != NULL);
508 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
509 (void) strcat(newpath, nm);
510 refstr_rele(pathref);
511 vfs_setmntpoint(vfsp, newpath);
513 pathref = vfs_getresource(vfsp);
514 (void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
515 VERIFY((tail = strrchr(newpath, '@')) != NULL);
517 ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
518 (void) strcat(newpath, nm);
519 refstr_rele(pathref);
520 vfs_setresource(vfsp, newpath);
527 zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
528 cred_t *cr, caller_context_t *ct, int flags)
530 zfsctl_snapdir_t *sdp = sdvp->v_data;
531 zfs_snapentry_t search, *sep;
534 char from[MAXNAMELEN], to[MAXNAMELEN];
535 char real[MAXNAMELEN];
538 zfsvfs = sdvp->v_vfsp->vfs_data;
541 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
542 err = dmu_snapshot_realname(zfsvfs->z_os, snm, real,
546 } else if (err != ENOTSUP) {
554 err = zfsctl_snapshot_zname(sdvp, snm, MAXNAMELEN, from);
556 err = zfsctl_snapshot_zname(tdvp, tnm, MAXNAMELEN, to);
558 err = zfs_secpolicy_rename_perms(from, to, cr);
563 * Cannot move snapshots out of the snapdir.
568 if (strcmp(snm, tnm) == 0)
571 mutex_enter(&sdp->sd_lock);
573 search.se_name = (char *)snm;
574 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
575 mutex_exit(&sdp->sd_lock);
579 err = dmu_objset_rename(from, to, B_FALSE);
581 zfsctl_rename_snap(sdp, sep, tnm);
583 mutex_exit(&sdp->sd_lock);
590 zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
591 caller_context_t *ct, int flags)
593 zfsctl_snapdir_t *sdp = dvp->v_data;
594 zfs_snapentry_t *sep;
595 zfs_snapentry_t search;
597 char snapname[MAXNAMELEN];
598 char real[MAXNAMELEN];
601 zfsvfs = dvp->v_vfsp->vfs_data;
604 if ((flags & FIGNORECASE) || zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
606 err = dmu_snapshot_realname(zfsvfs->z_os, name, real,
610 } else if (err != ENOTSUP) {
618 err = zfsctl_snapshot_zname(dvp, name, MAXNAMELEN, snapname);
620 err = zfs_secpolicy_destroy_perms(snapname, cr);
624 mutex_enter(&sdp->sd_lock);
626 search.se_name = name;
627 sep = avl_find(&sdp->sd_snaps, &search, NULL);
629 avl_remove(&sdp->sd_snaps, sep);
630 err = zfsctl_unmount_snap(sep, MS_FORCE, cr);
632 avl_add(&sdp->sd_snaps, sep);
634 err = dmu_objset_destroy(snapname);
639 mutex_exit(&sdp->sd_lock);
645 * This creates a snapshot under '.zfs/snapshot'.
649 zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp,
650 cred_t *cr, caller_context_t *cc, int flags, vsecattr_t *vsecp)
652 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
653 char name[MAXNAMELEN];
655 static enum symfollow follow = NO_FOLLOW;
656 static enum uio_seg seg = UIO_SYSSPACE;
658 dmu_objset_name(zfsvfs->z_os, name);
662 err = zfs_secpolicy_snapshot_perms(name, cr);
667 err = dmu_objset_snapshot(name, dirname, B_FALSE);
670 err = lookupnameat(dirname, seg, follow, NULL, vpp, dvp);
677 * Lookup entry point for the 'snapshot' directory. Try to open the
678 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
679 * Perform a mount of the associated dataset on top of the vnode.
683 zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
684 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
685 int *direntflags, pathname_t *realpnp)
687 zfsctl_snapdir_t *sdp = dvp->v_data;
689 char snapname[MAXNAMELEN];
690 char real[MAXNAMELEN];
692 zfs_snapentry_t *sep, search;
695 size_t mountpoint_len;
697 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
701 * No extended attributes allowed under .zfs
703 if (flags & LOOKUP_XATTR)
706 ASSERT(dvp->v_type == VDIR);
708 if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0)
712 * If we get a recursive call, that means we got called
713 * from the domount() code while it was trying to look up the
714 * spec (which looks like a local path for zfs). We need to
715 * add some flag to domount() to tell it not to do this lookup.
717 if (MUTEX_HELD(&sdp->sd_lock))
722 if (flags & FIGNORECASE) {
723 boolean_t conflict = B_FALSE;
725 err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
726 MAXNAMELEN, &conflict);
729 } else if (err != ENOTSUP) {
734 (void) strlcpy(realpnp->pn_buf, nm,
735 realpnp->pn_bufsize);
736 if (conflict && direntflags)
737 *direntflags = ED_CASE_CONFLICT;
740 mutex_enter(&sdp->sd_lock);
741 search.se_name = (char *)nm;
742 if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
749 } else if (*vpp == sep->se_root) {
751 * The snapshot was unmounted behind our backs,
757 * VROOT was set during the traverse call. We need
758 * to clear it since we're pretending to be part
759 * of our parent's vfs.
761 (*vpp)->v_flag &= ~VROOT;
763 mutex_exit(&sdp->sd_lock);
769 * The requested snapshot is not currently mounted, look it up.
771 err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname);
773 mutex_exit(&sdp->sd_lock);
777 if (dmu_objset_open(snapname, DMU_OST_ZFS,
778 DS_MODE_STANDARD | DS_MODE_READONLY, &snap) != 0) {
779 mutex_exit(&sdp->sd_lock);
784 sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
785 sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
786 (void) strcpy(sep->se_name, nm);
787 *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
788 avl_insert(&sdp->sd_snaps, sep, where);
790 dmu_objset_close(snap);
792 mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) +
793 strlen("/.zfs/snapshot/") + strlen(nm) + 1;
794 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
795 (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
796 refstr_value(dvp->v_vfsp->vfs_mntpt), nm);
798 margs.spec = snapname;
799 margs.dir = mountpoint;
800 margs.flags = MS_SYSSPACE | MS_NOMNTTAB;
801 margs.fstype = "zfs";
802 margs.dataptr = NULL;
807 err = domount("zfs", &margs, *vpp, kcred, &vfsp);
808 kmem_free(mountpoint, mountpoint_len);
812 * Return the mounted root rather than the covered mount point.
813 * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns
814 * the ZFS vnode mounted on top of the GFS node. This ZFS
815 * vnode is the root the newly created vfsp.
823 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
825 * This is where we lie about our v_vfsp in order to
826 * make .zfs/snapshot/<snapname> accessible over NFS
827 * without requiring manual mounts of <snapname>.
829 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
830 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
831 (*vpp)->v_vfsp = zfsvfs->z_vfs;
832 (*vpp)->v_flag &= ~VROOT;
834 mutex_exit(&sdp->sd_lock);
838 * If we had an error, drop our hold on the vnode and
839 * zfsctl_snapshot_inactive() will clean up.
850 zfsctl_snapdir_readdir_cb(vnode_t *vp, void *dp, int *eofp,
851 offset_t *offp, offset_t *nextp, void *data, int flags)
853 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
854 char snapname[MAXNAMELEN];
856 boolean_t case_conflict;
862 error = dmu_snapshot_list_next(zfsvfs->z_os, MAXNAMELEN, snapname, &id,
863 &cookie, &case_conflict);
866 if (error == ENOENT) {
873 if (flags & V_RDDIR_ENTFLAGS) {
874 edirent_t *eodp = dp;
876 (void) strcpy(eodp->ed_name, snapname);
877 eodp->ed_ino = ZFSCTL_INO_SNAP(id);
878 eodp->ed_eflags = case_conflict ? ED_CASE_CONFLICT : 0;
880 struct dirent64 *odp = dp;
882 (void) strcpy(odp->d_name, snapname);
883 odp->d_ino = ZFSCTL_INO_SNAP(id);
893 * pvp is the '.zfs' directory (zfsctl_node_t).
894 * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t).
896 * This function is the callback to create a GFS vnode for '.zfs/snapshot'
897 * when a lookup is performed on .zfs for "snapshot".
900 zfsctl_mknode_snapdir(vnode_t *pvp)
903 zfsctl_snapdir_t *sdp;
905 vp = gfs_dir_create(sizeof (zfsctl_snapdir_t), pvp,
906 zfsctl_ops_snapdir, NULL, NULL, MAXNAMELEN,
907 zfsctl_snapdir_readdir_cb, NULL);
909 sdp->sd_node.zc_id = ZFSCTL_INO_SNAPDIR;
910 sdp->sd_node.zc_cmtime = ((zfsctl_node_t *)pvp->v_data)->zc_cmtime;
911 mutex_init(&sdp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
912 avl_create(&sdp->sd_snaps, snapentry_compare,
913 sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
919 zfsctl_snapdir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
920 caller_context_t *ct)
922 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
923 zfsctl_snapdir_t *sdp = vp->v_data;
926 zfsctl_common_getattr(vp, vap);
927 vap->va_nodeid = gfs_file_inode(vp);
928 vap->va_nlink = vap->va_size = avl_numnodes(&sdp->sd_snaps) + 2;
936 zfsctl_snapdir_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
938 zfsctl_snapdir_t *sdp = vp->v_data;
941 private = gfs_dir_inactive(vp);
942 if (private != NULL) {
943 ASSERT(avl_numnodes(&sdp->sd_snaps) == 0);
944 mutex_destroy(&sdp->sd_lock);
945 avl_destroy(&sdp->sd_snaps);
946 kmem_free(private, sizeof (zfsctl_snapdir_t));
950 static const fs_operation_def_t zfsctl_tops_snapdir[] = {
951 { VOPNAME_OPEN, { .vop_open = zfsctl_common_open } },
952 { VOPNAME_CLOSE, { .vop_close = zfsctl_common_close } },
953 { VOPNAME_IOCTL, { .error = fs_inval } },
954 { VOPNAME_GETATTR, { .vop_getattr = zfsctl_snapdir_getattr } },
955 { VOPNAME_ACCESS, { .vop_access = zfsctl_common_access } },
956 { VOPNAME_RENAME, { .vop_rename = zfsctl_snapdir_rename } },
957 { VOPNAME_RMDIR, { .vop_rmdir = zfsctl_snapdir_remove } },
958 { VOPNAME_MKDIR, { .vop_mkdir = zfsctl_snapdir_mkdir } },
959 { VOPNAME_READDIR, { .vop_readdir = gfs_vop_readdir } },
960 { VOPNAME_LOOKUP, { .vop_lookup = zfsctl_snapdir_lookup } },
961 { VOPNAME_SEEK, { .vop_seek = fs_seek } },
962 { VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapdir_inactive } },
963 { VOPNAME_FID, { .vop_fid = zfsctl_common_fid } },
968 * pvp is the GFS vnode '.zfs/snapshot'.
970 * This creates a GFS node under '.zfs/snapshot' representing each
971 * snapshot. This newly created GFS node is what we mount snapshot
975 zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset)
980 vp = gfs_dir_create(sizeof (zfsctl_node_t), pvp,
981 zfsctl_ops_snapshot, NULL, NULL, MAXNAMELEN, NULL, NULL);
984 VFS_HOLD(vp->v_vfsp);
990 zfsctl_snapshot_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
992 zfsctl_snapdir_t *sdp;
993 zfs_snapentry_t *sep, *next;
996 VERIFY(gfs_dir_lookup(vp, "..", &dvp, cr, 0, NULL, NULL) == 0);
999 mutex_enter(&sdp->sd_lock);
1001 if (vp->v_count > 1) {
1002 mutex_exit(&sdp->sd_lock);
1005 ASSERT(!vn_ismntpt(vp));
1007 sep = avl_first(&sdp->sd_snaps);
1008 while (sep != NULL) {
1009 next = AVL_NEXT(&sdp->sd_snaps, sep);
1011 if (sep->se_root == vp) {
1012 avl_remove(&sdp->sd_snaps, sep);
1013 kmem_free(sep->se_name, strlen(sep->se_name) + 1);
1014 kmem_free(sep, sizeof (zfs_snapentry_t));
1019 ASSERT(sep != NULL);
1021 mutex_exit(&sdp->sd_lock);
1023 VFS_RELE(vp->v_vfsp);
1026 * Dispose of the vnode for the snapshot mount point.
1027 * This is safe to do because once this entry has been removed
1028 * from the AVL tree, it can't be found again, so cannot become
1029 * "active". If we lookup the same name again we will end up
1030 * creating a new vnode.
1032 gfs_vop_inactive(vp, cr, ct);
1037 * These VP's should never see the light of day. They should always
1040 static const fs_operation_def_t zfsctl_tops_snapshot[] = {
1041 VOPNAME_INACTIVE, { .vop_inactive = zfsctl_snapshot_inactive },
1046 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1048 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1050 zfsctl_snapdir_t *sdp;
1052 zfs_snapentry_t *sep;
1055 ASSERT(zfsvfs->z_ctldir != NULL);
1056 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1057 NULL, 0, NULL, kcred, NULL, NULL, NULL);
1062 mutex_enter(&sdp->sd_lock);
1063 sep = avl_first(&sdp->sd_snaps);
1064 while (sep != NULL) {
1067 if (zcp->zc_id == objsetid)
1070 sep = AVL_NEXT(&sdp->sd_snaps, sep);
1076 * Return the mounted root rather than the covered mount point.
1077 * Takes the GFS vnode at .zfs/snapshot/<snapshot objsetid>
1078 * and returns the ZFS vnode mounted on top of the GFS node.
1079 * This ZFS vnode is the root of the vfs for objset 'objsetid'.
1081 error = traverse(&vp);
1083 if (vp == sep->se_root)
1086 *zfsvfsp = VTOZ(vp)->z_zfsvfs;
1088 mutex_exit(&sdp->sd_lock);
1092 mutex_exit(&sdp->sd_lock);
1101 * Unmount any snapshots for the given filesystem. This is called from
1102 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1106 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1108 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1110 zfsctl_snapdir_t *sdp;
1111 zfs_snapentry_t *sep, *next;
1114 ASSERT(zfsvfs->z_ctldir != NULL);
1115 error = zfsctl_root_lookup(zfsvfs->z_ctldir, "snapshot", &dvp,
1116 NULL, 0, NULL, cr, NULL, NULL, NULL);
1121 mutex_enter(&sdp->sd_lock);
1123 sep = avl_first(&sdp->sd_snaps);
1124 while (sep != NULL) {
1125 next = AVL_NEXT(&sdp->sd_snaps, sep);
1128 * If this snapshot is not mounted, then it must
1129 * have just been unmounted by somebody else, and
1130 * will be cleaned up by zfsctl_snapdir_inactive().
1132 if (vn_ismntpt(sep->se_root)) {
1133 avl_remove(&sdp->sd_snaps, sep);
1134 error = zfsctl_unmount_snap(sep, fflags, cr);
1136 avl_add(&sdp->sd_snaps, sep);
1143 mutex_exit(&sdp->sd_lock);