#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
+#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sharefs/share.h>
#include <sys/dmu_objset.h>
+#include <sys/fm/util.h>
+
+#include <linux/miscdevice.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_comutil.h"
-extern struct modlfs zfs_modlfs;
+kmutex_t zfsdev_state_lock;
+list_t zfsdev_state_list;
extern void zfs_init(void);
extern void zfs_fini(void);
-ldi_ident_t zfs_li = NULL;
-dev_info_t *zfs_dip;
-
typedef int zfs_ioc_func_t(zfs_cmd_t *);
typedef int zfs_secpolicy_func_t(zfs_cmd_t *, cred_t *);
DATASET_NAME
} zfs_ioc_namecheck_t;
+typedef enum {
+ POOL_CHECK_NONE = 1 << 0,
+ POOL_CHECK_SUSPENDED = 1 << 1,
+ POOL_CHECK_READONLY = 1 << 2
+} zfs_ioc_poolcheck_t;
+
typedef struct zfs_ioc_vec {
zfs_ioc_func_t *zvec_func;
zfs_secpolicy_func_t *zvec_secpolicy;
zfs_ioc_namecheck_t zvec_namecheck;
boolean_t zvec_his_log;
- boolean_t zvec_pool_check;
+ zfs_ioc_poolcheck_t zvec_pool_check;
} zfs_ioc_vec_t;
/* This array is indexed by zfs_userquota_prop_t */
{
char *buf;
- if (zc->zc_history == NULL)
+ if (zc->zc_history == 0)
return (NULL);
- buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
+ buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP | KM_NODEBUG);
if (copyinstr((void *)(uintptr_t)zc->zc_history,
buf, HIS_MAX_RECORD_LEN, NULL) != 0) {
history_str_free(buf);
}
static int
-zfs_dozonecheck(const char *dataset, cred_t *cr)
+zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr)
{
- uint64_t zoned;
int writable = 1;
/*
!zone_dataset_visible(dataset, &writable))
return (ENOENT);
- if (dsl_prop_get_integer(dataset, "zoned", &zoned, NULL))
- return (ENOENT);
-
if (INGLOBALZONE(curproc)) {
/*
* If the fs is zoned, only root can access it from the
return (0);
}
+static int
+zfs_dozonecheck(const char *dataset, cred_t *cr)
+{
+ uint64_t zoned;
+
+ if (dsl_prop_get_integer(dataset, "zoned", &zoned, NULL))
+ return (ENOENT);
+
+ return (zfs_dozonecheck_impl(dataset, zoned, cr));
+}
+
+static int
+zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr)
+{
+ uint64_t zoned;
+
+ rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
+ if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL)) {
+ rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
+ return (ENOENT);
+ }
+ rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
+
+ return (zfs_dozonecheck_impl(dataset, zoned, cr));
+}
+
int
zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr)
{
return (error);
}
+int
+zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds,
+ const char *perm, cred_t *cr)
+{
+ int error;
+
+ error = zfs_dozonecheck_ds(name, ds, cr);
+ if (error == 0) {
+ error = secpolicy_zfs(cr);
+ if (error)
+ error = dsl_deleg_access_impl(ds, perm, cr);
+ }
+ return (error);
+}
+
/*
* Policy for setting the security label property.
*
static int
zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr)
{
+#ifdef HAVE_MLSLABEL
char ds_hexsl[MAXNAMELEN];
bslabel_t ds_sl, new_sl;
boolean_t new_default = FALSE;
if (needed_priv != -1)
return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL));
return (0);
+#else
+ return ENOTSUP;
+#endif /* HAVE_MLSLABEL */
}
static int
* Check permissions for special properties.
*/
switch (prop) {
+ default:
+ break;
case ZFS_PROP_ZONED:
/*
* Disallow setting of 'zoned' from within a local zone.
int
zfs_secpolicy_send(zfs_cmd_t *zc, cred_t *cr)
{
- return (zfs_secpolicy_write_perms(zc->zc_name,
- ZFS_DELEG_PERM_SEND, cr));
+ spa_t *spa;
+ dsl_pool_t *dp;
+ dsl_dataset_t *ds;
+ char *cp;
+ int error;
+
+ /*
+ * Generate the current snapshot name from the given objsetid, then
+ * use that name for the secpolicy/zone checks.
+ */
+ cp = strchr(zc->zc_name, '@');
+ if (cp == NULL)
+ return (EINVAL);
+ error = spa_open(zc->zc_name, &spa, FTAG);
+ if (error)
+ return (error);
+
+ dp = spa_get_dsl(spa);
+ rw_enter(&dp->dp_config_rwlock, RW_READER);
+ error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
+ rw_exit(&dp->dp_config_rwlock);
+ spa_close(spa, FTAG);
+ if (error)
+ return (error);
+
+ dsl_dataset_name(ds, zc->zc_name);
+
+ error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
+ ZFS_DELEG_PERM_SEND, cr);
+ dsl_dataset_rele(ds, FTAG);
+
+ return (error);
}
+#ifdef HAVE_ZPL
static int
zfs_secpolicy_deleg_share(zfs_cmd_t *zc, cred_t *cr)
{
return (dsl_deleg_access(zc->zc_name,
ZFS_DELEG_PERM_SHARE, cr));
}
+#endif /* HAVE_ZPL */
int
zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr)
{
+#ifdef HAVE_ZPL
if (!INGLOBALZONE(curproc))
return (EPERM);
} else {
return (zfs_secpolicy_deleg_share(zc, cr));
}
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
int
zfs_secpolicy_smb_acl(zfs_cmd_t *zc, cred_t *cr)
{
+#ifdef HAVE_ZPL
if (!INGLOBALZONE(curproc))
return (EPERM);
} else {
return (zfs_secpolicy_deleg_share(zc, cr));
}
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
static int
return (error);
}
+#ifdef HAVE_ZPL
static int
zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr)
{
}
return (error);
}
+#endif /* HAVE_ZPL */
/*
* Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
}
/*
+ * Policy for object to name lookups.
+ */
+/* ARGSUSED */
+static int
+zfs_secpolicy_diff(zfs_cmd_t *zc, cred_t *cr)
+{
+ int error;
+
+ if ((error = secpolicy_sys_config(cr, B_FALSE)) == 0)
+ return (0);
+
+ error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr);
+ return (error);
+}
+
+/*
* Policy for fault injection. Requires all privileges.
*/
/* ARGSUSED */
}
/*
+ * Policy for allowing temporary snapshots to be taken or released
+ */
+static int
+zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, cred_t *cr)
+{
+ /*
+ * A temporary snapshot is the same as a snapshot,
+ * hold, destroy and release all rolled into one.
+ * Delegated diff alone is sufficient that we allow this.
+ */
+ int error;
+
+ if ((error = zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_DIFF, cr)) == 0)
+ return (0);
+
+ error = zfs_secpolicy_snapshot(zc, cr);
+ if (!error)
+ error = zfs_secpolicy_hold(zc, cr);
+ if (!error)
+ error = zfs_secpolicy_release(zc, cr);
+ if (!error)
+ error = zfs_secpolicy_destroy(zc, cr);
+ return (error);
+}
+
+/*
* Returns the nvlist as specified by the user in the zfs_cmd_t.
*/
static int
if (size == 0)
return (EINVAL);
- packed = kmem_alloc(size, KM_SLEEP);
+ packed = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size,
iflag)) != 0) {
if (size > zc->zc_nvlist_dst_size) {
error = ENOMEM;
} else {
- packed = kmem_alloc(size, KM_SLEEP);
+ packed = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
VERIFY(nvlist_pack(nvl, &packed, &size, NV_ENCODE_NATIVE,
KM_SLEEP) == 0);
if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst,
return (error);
}
+#ifdef HAVE_ZPL
static int
getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
{
dmu_objset_rele(os, FTAG);
return (error);
}
+#endif
/*
* Find a zfsvfs_t for a mounted filesystem, or create our own, in which
* case its z_vfs will be NULL, and it will be opened as the owner.
*/
static int
-zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp)
+zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
{
+#ifdef HAVE_ZPL
int error = 0;
if (getzfsvfs(name, zfvp) != 0)
error = zfsvfs_create(name, zfvp);
if (error == 0) {
- rrw_enter(&(*zfvp)->z_teardown_lock, RW_READER, tag);
+ rrw_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER :
+ RW_READER, tag);
if ((*zfvp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
}
}
return (error);
+#else
+ return ENOTSUP;
+#endif
}
static void
zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
{
+#ifdef HAVE_ZPL
rrw_exit(&zfsvfs->z_teardown_lock, tag);
if (zfsvfs->z_vfs) {
dmu_objset_disown(zfsvfs->z_os, zfsvfs);
zfsvfs_free(zfsvfs);
}
+#endif
}
static int
nvlist_t *zplprops = NULL;
char *buf;
- if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
- zc->zc_iflags, &config))
+ if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
+ zc->zc_iflags, &config)))
return (error);
if (zc->zc_nvlist_src_size != 0 && (error =
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
guid != zc->zc_guid)
error = EINVAL;
- else if (zc->zc_cookie)
- error = spa_import_verbatim(zc->zc_name, config, props);
else
- error = spa_import(zc->zc_name, config, props);
+ error = spa_import(zc->zc_name, config, props, zc->zc_cookie);
+
+ if (zc->zc_nvlist_dst != 0) {
+ int err;
+
+ if ((err = put_nvlist(zc, config)) != 0)
+ error = err;
+ }
- if (zc->zc_nvlist_dst != 0)
- (void) put_nvlist(zc, config);
+ if (error == 0)
+ zvol_create_minors(zc->zc_name);
nvlist_free(config);
{
int error;
- if (error = dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value))
+ if ((error = dsl_dsobj_to_dsname(zc->zc_name,zc->zc_obj,zc->zc_value)))
return (error);
return (0);
return (error);
}
+/*
+ * inputs:
+ * zc_name name of filesystem
+ * zc_obj object to find
+ *
+ * outputs:
+ * zc_stat stats on object
+ * zc_value path to object
+ */
+static int
+zfs_ioc_obj_to_stats(zfs_cmd_t *zc)
+{
+ objset_t *os;
+ int error;
+
+ /* XXX reading from objset not owned */
+ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)) != 0)
+ return (error);
+ if (dmu_objset_type(os) != DMU_OST_ZFS) {
+ dmu_objset_rele(os, FTAG);
+ return (EINVAL);
+ }
+ error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value,
+ sizeof (zc->zc_value));
+ dmu_objset_rele(os, FTAG);
+
+ return (error);
+}
+
static int
zfs_ioc_vdev_add(zfs_cmd_t *zc)
{
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
- if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
- zc->zc_iflags, &config)) {
+ if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
+ zc->zc_iflags, &config))) {
spa_close(spa, FTAG);
return (error);
}
return (error);
}
-/*
- * inputs:
- * zc_name name of filesystem
- * zc_nvlist_dst_size size of buffer for property nvlist
- *
- * outputs:
- * zc_objset_stats stats
- * zc_nvlist_dst property nvlist
- * zc_nvlist_dst_size size of property nvlist
- */
static int
-zfs_ioc_objset_stats(zfs_cmd_t *zc)
+zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os)
{
- objset_t *os = NULL;
- int error;
+ int error = 0;
nvlist_t *nv;
- if (error = dmu_objset_hold(zc->zc_name, FTAG, &os))
- return (error);
-
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
if (zc->zc_nvlist_dst != 0 &&
*/
if (!zc->zc_objset_stats.dds_inconsistent) {
if (dmu_objset_type(os) == DMU_OST_ZVOL)
- VERIFY(zvol_get_stats(os, nv) == 0);
+ error = zvol_get_stats(os, nv);
}
- error = put_nvlist(zc, nv);
+ if (error == 0)
+ error = put_nvlist(zc, nv);
nvlist_free(nv);
}
+ return (error);
+}
+
+/*
+ * inputs:
+ * zc_name name of filesystem
+ * zc_nvlist_dst_size size of buffer for property nvlist
+ *
+ * outputs:
+ * zc_objset_stats stats
+ * zc_nvlist_dst property nvlist
+ * zc_nvlist_dst_size size of property nvlist
+ */
+static int
+zfs_ioc_objset_stats(zfs_cmd_t *zc)
+{
+ objset_t *os = NULL;
+ int error;
+
+ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)))
+ return (error);
+
+ error = zfs_ioc_objset_stats_impl(zc, os);
+
dmu_objset_rele(os, FTAG);
+
return (error);
}
int error;
nvlist_t *nv;
- if (error = dmu_objset_hold(zc->zc_name, FTAG, &os))
+ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)))
return (error);
/*
int err;
/* XXX reading without owning */
- if (err = dmu_objset_hold(zc->zc_name, FTAG, &os))
+ if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os)))
return (err);
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
* which we aren't supposed to do with a DS_MODE_USER
* hold, because it could be inconsistent.
*/
- if (zc->zc_nvlist_dst != NULL &&
+ if (zc->zc_nvlist_dst != 0 &&
!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
size_t orig_len = strlen(zc->zc_name);
top:
- if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) {
+ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) {
if (error == ENOENT)
error = ESRCH;
return (error);
error = dmu_snapshot_list_next(os,
sizeof (zc->zc_name) - strlen(zc->zc_name),
- zc->zc_name + strlen(zc->zc_name), NULL, &zc->zc_cookie, NULL);
- dmu_objset_rele(os, FTAG);
+ zc->zc_name + strlen(zc->zc_name), &zc->zc_obj, &zc->zc_cookie,
+ NULL);
+
if (error == 0) {
- error = zfs_ioc_objset_stats(zc); /* fill in the stats */
- if (error == ENOENT) {
- /* We lost a race with destroy, get the next one. */
- *strchr(zc->zc_name, '@') = '\0';
- goto top;
+ dsl_dataset_t *ds;
+ dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool;
+
+ /*
+ * Since we probably don't have a hold on this snapshot,
+ * it's possible that the objsetid could have been destroyed
+ * and reused for a new objset. It's OK if this happens during
+ * a zfs send operation, since the new createtxg will be
+ * beyond the range we're interested in.
+ */
+ rw_enter(&dp->dp_config_rwlock, RW_READER);
+ error = dsl_dataset_hold_obj(dp, zc->zc_obj, FTAG, &ds);
+ rw_exit(&dp->dp_config_rwlock);
+ if (error) {
+ if (error == ENOENT) {
+ /* Racing with destroy, get the next one. */
+ *strchr(zc->zc_name, '@') = '\0';
+ dmu_objset_rele(os, FTAG);
+ goto top;
+ }
+ } else {
+ objset_t *ossnap;
+
+ error = dmu_objset_from_ds(ds, &ossnap);
+ if (error == 0)
+ error = zfs_ioc_objset_stats_impl(zc, ossnap);
+ dsl_dataset_rele(ds, FTAG);
}
} else if (error == ENOENT) {
error = ESRCH;
}
+ dmu_objset_rele(os, FTAG);
/* if we failed, undo the @ that we tacked on to zc_name */
if (error)
*strchr(zc->zc_name, '@') = '\0';
static int
zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
{
+#ifdef HAVE_ZPL
const char *propname = nvpair_name(pair);
uint64_t *valary;
unsigned int vallen;
rid = valary[1];
quota = valary[2];
- err = zfsvfs_hold(dsname, FTAG, &zfsvfs);
+ err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
if (err == 0) {
err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
zfsvfs_rele(zfsvfs, FTAG);
}
return (err);
+#else
+ return ENOTSUP;
+#endif
}
/*
err = dsl_dataset_set_reservation(dsname, source, intval);
break;
case ZFS_PROP_VOLSIZE:
- err = zvol_set_volsize(dsname, ddi_driver_major(zfs_dip),
- intval);
+ err = zvol_set_volsize(dsname, intval);
break;
case ZFS_PROP_VERSION:
{
zfsvfs_t *zfsvfs;
- if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs)) != 0)
+ if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
break;
+#ifdef HAVE_ZPL
err = zfs_set_version(zfsvfs, intval);
+#endif
zfsvfs_rele(zfsvfs, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
nvpair_type(pair) != DATA_TYPE_STRING)
return (EINVAL);
- if (error = zfs_secpolicy_write_perms(fsname,
- ZFS_DELEG_PERM_USERPROP, CRED()))
+ if ((error = zfs_secpolicy_write_perms(fsname,
+ ZFS_DELEG_PERM_USERPROP, CRED())))
return (error);
if (strlen(propname) >= ZAP_MAXNAMELEN)
error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, &errors);
- if (zc->zc_nvlist_dst != NULL && errors != NULL) {
+ if (zc->zc_nvlist_dst != 0 && errors != NULL) {
(void) put_nvlist(zc, errors);
}
int error;
nvpair_t *pair;
- if (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
- zc->zc_iflags, &props))
+ if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
+ zc->zc_iflags, &props)))
return (error);
/*
spa_close(spa, FTAG);
}
- if (error == 0 && zc->zc_nvlist_dst != NULL)
+ if (error == 0 && zc->zc_nvlist_dst != 0)
error = put_nvlist(zc, nvp);
else
error = EFAULT;
/*
* inputs:
+ * zc_name name of volume
+ *
+ * outputs: none
+ */
+static int
+zfs_ioc_create_minor(zfs_cmd_t *zc)
+{
+ return (zvol_create_minor(zc->zc_name));
+}
+
+/*
+ * inputs:
+ * zc_name name of volume
+ *
+ * outputs: none
+ */
+static int
+zfs_ioc_remove_minor(zfs_cmd_t *zc)
+{
+ return (zvol_remove_minor(zc->zc_name));
+}
+
+/*
+ * inputs:
* zc_name name of filesystem
* zc_nvlist_src{_size} nvlist of delegated permissions
* zc_perm_action allow/unallow flag
return (error);
}
+#ifdef HAVE_ZPL
/*
* Search the vfs list for a specified resource. Returns a pointer to it
* or NULL if no suitable entry is found. The caller of this routine
vfs_list_unlock();
return (vfs_found);
}
+#endif /* HAVE_ZPL */
/* ARGSUSED */
static void
strchr(zc->zc_name, '%'))
return (EINVAL);
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &nvprops)) != 0)
return (error);
if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0)
return (EINVAL);
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &nvprops)) != 0)
return (error);
goto out;
}
- error = dmu_objset_snapshot(zc->zc_name, zc->zc_value,
- nvprops, recursive);
+ error = dmu_objset_snapshot(zc->zc_name, zc->zc_value, NULL,
+ nvprops, recursive, B_FALSE, -1);
out:
nvlist_free(nvprops);
int
zfs_unmount_snap(const char *name, void *arg)
{
+#ifdef HAVE_ZPL
vfs_t *vfsp = NULL;
if (arg) {
if ((err = dounmount(vfsp, flag, kcred)) != 0)
return (err);
}
+#endif /* HAVE_ZPL */
return (0);
}
static int
zfs_ioc_rollback(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
dsl_dataset_t *ds, *clone;
int error;
zfsvfs_t *zfsvfs;
if (ds)
dsl_dataset_rele(ds, FTAG);
return (error);
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
/*
if (prop == ZPROP_INVAL) {
if (zfs_prop_user(propname)) {
- if (err = zfs_secpolicy_write_perms(dsname,
- ZFS_DELEG_PERM_USERPROP, cr))
+ if ((err = zfs_secpolicy_write_perms(dsname,
+ ZFS_DELEG_PERM_USERPROP, cr)))
return (err);
return (0);
}
return (EINVAL);
}
- if (err = zfs_secpolicy_write_perms(dsname, perm, cr))
+ if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
return (err);
return (0);
}
return (ENOTSUP);
}
break;
+ default:
+ break;
}
return (zfs_secpolicy_setprop(dsname, prop, pair, CRED()));
* zc_cookie file descriptor to recv from
* zc_begin_record the BEGIN record of the stream (not byteswapped)
* zc_guid force flag
+ * zc_cleanup_fd cleanup-on-exit file descriptor
+ * zc_action_handle handle for this guid/ds mapping (or zero on first call)
*
* outputs:
* zc_cookie number of bytes read
* zc_nvlist_dst{_size} error for each unapplied received property
* zc_obj zprop_errflags_t
+ * zc_action_handle handle for this guid/ds mapping
*/
static int
zfs_ioc_recv(zfs_cmd_t *zc)
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props)) != 0)
return (error);
}
off = fp->f_offset;
- error = dmu_recv_stream(&drc, fp->f_vnode, &off);
+ error = dmu_recv_stream(&drc, fp->f_vnode, &off, zc->zc_cleanup_fd,
+ &zc->zc_action_handle);
if (error == 0) {
+#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs = NULL;
if (getzfsvfs(tofs, &zfsvfs) == 0) {
} else {
error = dmu_recv_end(&drc);
}
+#else
+ error = dmu_recv_end(&drc);
+#endif /* HAVE_ZPL */
}
zc->zc_cookie = off - fp->f_offset;
/*
* inputs:
* zc_name name of snapshot to send
- * zc_value short name of incremental fromsnap (may be empty)
* zc_cookie file descriptor to send stream to
- * zc_obj fromorigin flag (mutually exclusive with zc_value)
+ * zc_obj fromorigin flag (mutually exclusive with zc_fromobj)
+ * zc_sendobj objsetid of snapshot to send
+ * zc_fromobj objsetid of incremental fromsnap (may be zero)
*
* outputs: none
*/
file_t *fp;
int error;
offset_t off;
+ dsl_dataset_t *ds;
+ dsl_dataset_t *dsfrom = NULL;
+ spa_t *spa;
+ dsl_pool_t *dp;
- error = dmu_objset_hold(zc->zc_name, FTAG, &tosnap);
+ error = spa_open(zc->zc_name, &spa, FTAG);
if (error)
return (error);
- if (zc->zc_value[0] != '\0') {
- char *buf;
- char *cp;
-
- buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
- (void) strncpy(buf, zc->zc_name, MAXPATHLEN);
- cp = strchr(buf, '@');
- if (cp)
- *(cp+1) = 0;
- (void) strncat(buf, zc->zc_value, MAXPATHLEN);
- error = dmu_objset_hold(buf, FTAG, &fromsnap);
- kmem_free(buf, MAXPATHLEN);
+ dp = spa_get_dsl(spa);
+ rw_enter(&dp->dp_config_rwlock, RW_READER);
+ error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
+ rw_exit(&dp->dp_config_rwlock);
+ if (error) {
+ spa_close(spa, FTAG);
+ return (error);
+ }
+
+ error = dmu_objset_from_ds(ds, &tosnap);
+ if (error) {
+ dsl_dataset_rele(ds, FTAG);
+ spa_close(spa, FTAG);
+ return (error);
+ }
+
+ if (zc->zc_fromobj != 0) {
+ rw_enter(&dp->dp_config_rwlock, RW_READER);
+ error = dsl_dataset_hold_obj(dp, zc->zc_fromobj, FTAG, &dsfrom);
+ rw_exit(&dp->dp_config_rwlock);
+ spa_close(spa, FTAG);
if (error) {
- dmu_objset_rele(tosnap, FTAG);
+ dsl_dataset_rele(ds, FTAG);
return (error);
}
+ error = dmu_objset_from_ds(dsfrom, &fromsnap);
+ if (error) {
+ dsl_dataset_rele(dsfrom, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ return (error);
+ }
+ } else {
+ spa_close(spa, FTAG);
}
fp = getf(zc->zc_cookie);
if (fp == NULL) {
- dmu_objset_rele(tosnap, FTAG);
- if (fromsnap)
- dmu_objset_rele(fromsnap, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ if (dsfrom)
+ dsl_dataset_rele(dsfrom, FTAG);
return (EBADF);
}
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
releasef(zc->zc_cookie);
- if (fromsnap)
- dmu_objset_rele(fromsnap, FTAG);
- dmu_objset_rele(tosnap, FTAG);
+ if (dsfrom)
+ dsl_dataset_rele(dsfrom, FTAG);
+ dsl_dataset_rele(ds, FTAG);
return (error);
}
nvlist_t *policy;
nvlist_t *config = NULL;
- if (zc->zc_nvlist_src == NULL)
+ if (zc->zc_nvlist_src == 0)
return (EINVAL);
if ((error = get_nvlist(zc->zc_nvlist_src,
error = spa_open_rewind(zc->zc_name, &spa, FTAG,
policy, &config);
if (config != NULL) {
- (void) put_nvlist(zc, config);
+ int err;
+
+ if ((err = put_nvlist(zc, config)) != 0)
+ error = err;
nvlist_free(config);
}
nvlist_free(policy);
static int
zfs_ioc_userspace_one(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs;
int error;
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
- error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs);
+ error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error)
return (error);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
/*
static int
zfs_ioc_userspace_many(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size;
if (bufsize <= 0)
return (ENOMEM);
- int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs);
+ int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error)
return (error);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
/*
static int
zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
objset_t *os;
int error = 0;
zfsvfs_t *zfsvfs;
}
return (error);
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
/*
* the first file system is shared.
* Neither sharefs, nfs or smbsrv are unloadable modules.
*/
+#ifdef HAVE_ZPL
int (*znfsexport_fs)(void *arg);
int (*zshare_fs)(enum sharefs_sys_op, share_t *, uint32_t);
int (*zsmbexport_fs)(void *arg, boolean_t add_share);
}
return (0);
}
+#endif /* HAVE_ZPL */
static int
zfs_ioc_share(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
int error;
int opcode;
zc->zc_share.z_sharemax);
return (error);
-
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
ace_t full_access[] = {
};
/*
+ * inputs:
+ * zc_name name of containing filesystem
+ * zc_obj object # beyond which we want next in-use object #
+ *
+ * outputs:
+ * zc_obj next in-use object #
+ */
+static int
+zfs_ioc_next_obj(zfs_cmd_t *zc)
+{
+ objset_t *os = NULL;
+ int error;
+
+ error = dmu_objset_hold(zc->zc_name, FTAG, &os);
+ if (error)
+ return (error);
+
+ error = dmu_object_next(os, &zc->zc_obj, B_FALSE,
+ os->os_dsl_dataset->ds_phys->ds_prev_snap_txg);
+
+ dmu_objset_rele(os, FTAG);
+ return (error);
+}
+
+/*
+ * inputs:
+ * zc_name name of filesystem
+ * zc_value prefix name for snapshot
+ * zc_cleanup_fd cleanup-on-exit file descriptor for calling process
+ *
+ * outputs:
+ */
+static int
+zfs_ioc_tmp_snapshot(zfs_cmd_t *zc)
+{
+ char *snap_name;
+ int error;
+
+ snap_name = kmem_asprintf("%s-%016llx", zc->zc_value,
+ (u_longlong_t)ddi_get_lbolt64());
+
+ if (strlen(snap_name) >= MAXNAMELEN) {
+ strfree(snap_name);
+ return (E2BIG);
+ }
+
+ error = dmu_objset_snapshot(zc->zc_name, snap_name, snap_name,
+ NULL, B_FALSE, B_TRUE, zc->zc_cleanup_fd);
+ if (error != 0) {
+ strfree(snap_name);
+ return (error);
+ }
+
+ (void) strcpy(zc->zc_value, snap_name);
+ strfree(snap_name);
+ return (0);
+}
+
+/*
+ * inputs:
+ * zc_name name of "to" snapshot
+ * zc_value name of "from" snapshot
+ * zc_cookie file descriptor to write diff data on
+ *
+ * outputs:
+ * dmu_diff_record_t's to the file descriptor
+ */
+static int
+zfs_ioc_diff(zfs_cmd_t *zc)
+{
+ objset_t *fromsnap;
+ objset_t *tosnap;
+ file_t *fp;
+ offset_t off;
+ int error;
+
+ error = dmu_objset_hold(zc->zc_name, FTAG, &tosnap);
+ if (error)
+ return (error);
+
+ error = dmu_objset_hold(zc->zc_value, FTAG, &fromsnap);
+ if (error) {
+ dmu_objset_rele(tosnap, FTAG);
+ return (error);
+ }
+
+ fp = getf(zc->zc_cookie);
+ if (fp == NULL) {
+ dmu_objset_rele(fromsnap, FTAG);
+ dmu_objset_rele(tosnap, FTAG);
+ return (EBADF);
+ }
+
+ off = fp->f_offset;
+
+ error = dmu_diff(tosnap, fromsnap, fp->f_vnode, &off);
+
+ if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
+ fp->f_offset = off;
+ releasef(zc->zc_cookie);
+
+ dmu_objset_rele(fromsnap, FTAG);
+ dmu_objset_rele(tosnap, FTAG);
+ return (error);
+}
+
+/*
* Remove all ACL files in shares dir
*/
+#ifdef HAVE_ZPL
static int
zfs_smb_acl_purge(znode_t *dzp)
{
zap_cursor_fini(&zc);
return (error);
}
+#endif /* HAVE ZPL */
static int
zfs_ioc_smb_acl(zfs_cmd_t *zc)
{
+#ifdef HAVE_ZPL
vnode_t *vp;
znode_t *dzp;
vnode_t *resourcevp = NULL;
ZFS_EXIT(zfsvfs);
return (error);
+#else
+ return (ENOTSUP);
+#endif /* HAVE_ZPL */
}
/*
* inputs:
- * zc_name name of filesystem
- * zc_value short name of snap
- * zc_string user-supplied tag for this reference
- * zc_cookie recursive flag
- * zc_temphold set if hold is temporary
+ * zc_name name of filesystem
+ * zc_value short name of snap
+ * zc_string user-supplied tag for this hold
+ * zc_cookie recursive flag
+ * zc_temphold set if hold is temporary
+ * zc_cleanup_fd cleanup-on-exit file descriptor for calling process
+ * zc_sendobj if non-zero, the objid for zc_name@zc_value
+ * zc_createtxg if zc_sendobj is non-zero, snap must have zc_createtxg
*
* outputs: none
*/
zfs_ioc_hold(zfs_cmd_t *zc)
{
boolean_t recursive = zc->zc_cookie;
+ spa_t *spa;
+ dsl_pool_t *dp;
+ dsl_dataset_t *ds;
+ int error;
+ minor_t minor = 0;
if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0)
return (EINVAL);
- return (dsl_dataset_user_hold(zc->zc_name, zc->zc_value,
- zc->zc_string, recursive, zc->zc_temphold));
+ if (zc->zc_sendobj == 0) {
+ return (dsl_dataset_user_hold(zc->zc_name, zc->zc_value,
+ zc->zc_string, recursive, zc->zc_temphold,
+ zc->zc_cleanup_fd));
+ }
+
+ if (recursive)
+ return (EINVAL);
+
+ error = spa_open(zc->zc_name, &spa, FTAG);
+ if (error)
+ return (error);
+
+ dp = spa_get_dsl(spa);
+ rw_enter(&dp->dp_config_rwlock, RW_READER);
+ error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
+ rw_exit(&dp->dp_config_rwlock);
+ spa_close(spa, FTAG);
+ if (error)
+ return (error);
+
+ /*
+ * Until we have a hold on this snapshot, it's possible that
+ * zc_sendobj could've been destroyed and reused as part
+ * of a later txg. Make sure we're looking at the right object.
+ */
+ if (zc->zc_createtxg != ds->ds_phys->ds_creation_txg) {
+ dsl_dataset_rele(ds, FTAG);
+ return (ENOENT);
+ }
+
+ if (zc->zc_cleanup_fd != -1 && zc->zc_temphold) {
+ error = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor);
+ if (error) {
+ dsl_dataset_rele(ds, FTAG);
+ return (error);
+ }
+ }
+
+ error = dsl_dataset_user_hold_for_send(ds, zc->zc_string,
+ zc->zc_temphold);
+ if (minor != 0) {
+ if (error == 0) {
+ dsl_register_onexit_hold_cleanup(ds, zc->zc_string,
+ minor);
+ }
+ zfs_onexit_fd_rele(zc->zc_cleanup_fd);
+ }
+ dsl_dataset_rele(ds, FTAG);
+
+ return (error);
}
/*
* inputs:
- * zc_name name of dataset from which we're releasing a user reference
+ * zc_name name of dataset from which we're releasing a user hold
* zc_value short name of snap
- * zc_string user-supplied tag for this reference
+ * zc_string user-supplied tag for this hold
* zc_cookie recursive flag
*
- * outputs: none
+ * outputs: none
*/
static int
zfs_ioc_release(zfs_cmd_t *zc)
}
/*
+ * inputs:
+ * zc_guid flags (ZEVENT_NONBLOCK)
+ *
+ * outputs:
+ * zc_nvlist_dst next nvlist event
+ * zc_cookie dropped events since last get
+ * zc_cleanup_fd cleanup-on-exit file descriptor
+ */
+static int
+zfs_ioc_events_next(zfs_cmd_t *zc)
+{
+ zfs_zevent_t *ze;
+ nvlist_t *event = NULL;
+ minor_t minor;
+ uint64_t dropped = 0;
+ int error;
+
+ error = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
+ if (error != 0)
+ return (error);
+
+ do {
+ error = zfs_zevent_next(ze, &event, &dropped);
+ if (event != NULL) {
+ zc->zc_cookie = dropped;
+ error = put_nvlist(zc, event);
+ nvlist_free(event);
+ }
+
+ if (zc->zc_guid & ZEVENT_NONBLOCK)
+ break;
+
+ if ((error == 0) || (error != ENOENT))
+ break;
+
+ error = zfs_zevent_wait(ze);
+ if (error)
+ break;
+ } while (1);
+
+ zfs_zevent_fd_rele(zc->zc_cleanup_fd);
+
+ return (error);
+}
+
+/*
+ * outputs:
+ * zc_cookie cleared events count
+ */
+static int
+zfs_ioc_events_clear(zfs_cmd_t *zc)
+{
+ int count;
+
+ zfs_zevent_drain_all(&count);
+ zc->zc_cookie = count;
+
+ return 0;
+}
+
+/*
* pool create, destroy, and export don't log the history as part of
* zfsdev_ioctl, but rather zfs_ioc_pool_create, and zfs_ioc_pool_export
* do the logging of those commands.
*/
static zfs_ioc_vec_t zfs_ioc_vec[] = {
{ zfs_ioc_pool_create, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_destroy, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_import, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_export, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_configs, zfs_secpolicy_none, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_stats, zfs_secpolicy_read, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_tryimport, zfs_secpolicy_config, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_scan, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_freeze, zfs_secpolicy_config, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_READONLY },
{ zfs_ioc_pool_upgrade, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_get_history, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_vdev_add, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_remove, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_set_state, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_FALSE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_attach, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_detach, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_setpath, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_setfru, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_objset_stats, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_objset_zplprops, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_dataset_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_snapshot_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
- { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE, B_TRUE },
+ POOL_CHECK_SUSPENDED },
+ { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_create_minor, zfs_secpolicy_config, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_remove_minor, zfs_secpolicy_config, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_destroy, zfs_secpolicy_destroy, DATASET_NAME, B_TRUE,
- B_TRUE},
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME, B_TRUE,
- B_TRUE },
- { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_TRUE, B_FALSE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_TRUE,
+ POOL_CHECK_NONE },
{ zfs_ioc_inject_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_clear_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_inject_list_next, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_error_log, zfs_secpolicy_inject, POOL_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE,
+ POOL_CHECK_NONE },
{ zfs_ioc_promote, zfs_secpolicy_promote, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, DATASET_NAME,
- B_TRUE, B_TRUE },
+ B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_snapshot, zfs_secpolicy_snapshot, DATASET_NAME, B_TRUE,
- B_TRUE },
- { zfs_ioc_dsobj_to_dsname, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_obj_to_path, zfs_secpolicy_config, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_dsobj_to_dsname, zfs_secpolicy_diff, POOL_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_obj_to_path, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_pool_set_props, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_get_props, zfs_secpolicy_read, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_set_fsacl, zfs_secpolicy_fsacl, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_get_fsacl, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
{ zfs_ioc_inherit_prop, zfs_secpolicy_inherit, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_smb_acl, zfs_secpolicy_smb_acl, DATASET_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_userspace_one, zfs_secpolicy_userspace_one,
- DATASET_NAME, B_FALSE, B_FALSE },
- { zfs_ioc_userspace_many, zfs_secpolicy_userspace_many,
- DATASET_NAME, B_FALSE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_userspace_one, zfs_secpolicy_userspace_one, DATASET_NAME,
+ B_FALSE, POOL_CHECK_NONE },
+ { zfs_ioc_userspace_many, zfs_secpolicy_userspace_many, DATASET_NAME,
+ B_FALSE, POOL_CHECK_NONE },
{ zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade,
- DATASET_NAME, B_FALSE, B_TRUE },
- { zfs_ioc_hold, zfs_secpolicy_hold, DATASET_NAME, B_TRUE, B_TRUE },
+ DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_hold, zfs_secpolicy_hold, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_release, zfs_secpolicy_release, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_objset_recvd_props, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_vdev_split, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE }
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_next_obj, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_diff, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot, DATASET_NAME,
+ B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_obj_to_stats, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
+ POOL_CHECK_SUSPENDED },
+ { zfs_ioc_events_next, zfs_secpolicy_config, NO_NAME, B_FALSE,
+ POOL_CHECK_NONE },
+ { zfs_ioc_events_clear, zfs_secpolicy_config, NO_NAME, B_FALSE,
+ POOL_CHECK_NONE },
};
int
-pool_status_check(const char *name, zfs_ioc_namecheck_t type)
+pool_status_check(const char *name, zfs_ioc_namecheck_t type,
+ zfs_ioc_poolcheck_t check)
{
spa_t *spa;
int error;
ASSERT(type == POOL_NAME || type == DATASET_NAME);
+ if (check & POOL_CHECK_NONE)
+ return (0);
+
error = spa_open(name, &spa, FTAG);
if (error == 0) {
- if (spa_suspended(spa))
+ if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa))
error = EAGAIN;
+ else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa))
+ error = EROFS;
spa_close(spa, FTAG);
}
return (error);
}
+static void *
+zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which)
+{
+ zfsdev_state_t *zs;
+
+ ASSERT(MUTEX_HELD(&zfsdev_state_lock));
+
+ for (zs = list_head(&zfsdev_state_list); zs != NULL;
+ zs = list_next(&zfsdev_state_list, zs)) {
+ if (zs->zs_minor == minor) {
+ switch (which) {
+ case ZST_ONEXIT: return (zs->zs_onexit);
+ case ZST_ZEVENT: return (zs->zs_zevent);
+ case ZST_ALL: return (zs);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void *
+zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
+{
+ void *ptr;
+
+ mutex_enter(&zfsdev_state_lock);
+ ptr = zfsdev_get_state_impl(minor, which);
+ mutex_exit(&zfsdev_state_lock);
+
+ return ptr;
+}
+
+minor_t
+zfsdev_getminor(struct file *filp)
+{
+ ASSERT(filp != NULL);
+ ASSERT(filp->private_data != NULL);
+
+ return (((zfsdev_state_t *)filp->private_data)->zs_minor);
+}
+
+/*
+ * Find a free minor number. The zfsdev_state_list is expected to
+ * be short since it is only a list of currently open file handles.
+ */
+minor_t
+zfsdev_minor_alloc(void)
+{
+ static minor_t last_minor = 0;
+ minor_t m;
+
+ ASSERT(MUTEX_HELD(&zfsdev_state_lock));
+
+ for (m = last_minor + 1; m != last_minor; m++) {
+ if (m > ZFSDEV_MAX_MINOR)
+ m = 1;
+ if (zfsdev_get_state_impl(m, ZST_ALL) == NULL) {
+ last_minor = m;
+ return (m);
+ }
+ }
+
+ return (0);
+}
+
static int
-zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
+zfsdev_state_init(struct file *filp)
+{
+ zfsdev_state_t *zs;
+ minor_t minor;
+
+ ASSERT(MUTEX_HELD(&zfsdev_state_lock));
+
+ minor = zfsdev_minor_alloc();
+ if (minor == 0)
+ return (ENXIO);
+
+ zs = kmem_zalloc( sizeof(zfsdev_state_t), KM_SLEEP);
+ if (zs == NULL)
+ return (ENOMEM);
+
+ zs->zs_file = filp;
+ zs->zs_minor = minor;
+ filp->private_data = zs;
+
+ zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
+ zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
+
+ list_insert_tail(&zfsdev_state_list, zs);
+
+ return (0);
+}
+
+static int
+zfsdev_state_destroy(struct file *filp)
+{
+ zfsdev_state_t *zs;
+
+ ASSERT(MUTEX_HELD(&zfsdev_state_lock));
+ ASSERT(filp->private_data != NULL);
+
+ zs = filp->private_data;
+ zfs_onexit_destroy(zs->zs_onexit);
+ zfs_zevent_destroy(zs->zs_zevent);
+
+ list_remove(&zfsdev_state_list, zs);
+ kmem_free(zs, sizeof(zfsdev_state_t));
+
+ return 0;
+}
+
+static int
+zfsdev_open(struct inode *ino, struct file *filp)
+{
+ int error;
+
+ mutex_enter(&zfsdev_state_lock);
+ error = zfsdev_state_init(filp);
+ mutex_exit(&zfsdev_state_lock);
+
+ return (-error);
+}
+
+static int
+zfsdev_release(struct inode *ino, struct file *filp)
+{
+ int error;
+
+ mutex_enter(&zfsdev_state_lock);
+ error = zfsdev_state_destroy(filp);
+ mutex_exit(&zfsdev_state_lock);
+
+ return (-error);
+}
+
+static long
+zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{
zfs_cmd_t *zc;
uint_t vec;
- int error, rc;
-
- if (getminor(dev) != 0)
- return (zvol_ioctl(dev, cmd, arg, flag, cr, rvalp));
+ int error, rc, flag = 0;
vec = cmd - ZFS_IOC;
- ASSERT3U(getmajor(dev), ==, ddi_driver_major(zfs_dip));
-
if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
- return (EINVAL);
+ return (-EINVAL);
- zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
+ zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP | KM_NODEBUG);
error = ddi_copyin((void *)arg, zc, sizeof (zfs_cmd_t), flag);
if (error != 0)
error = EFAULT;
if ((error == 0) && !(flag & FKIOCTL))
- error = zfs_ioc_vec[vec].zvec_secpolicy(zc, cr);
+ error = zfs_ioc_vec[vec].zvec_secpolicy(zc, NULL);
/*
* Ensure that all pool/dataset names are valid before we pass down to
case POOL_NAME:
if (pool_namecheck(zc->zc_name, NULL, NULL) != 0)
error = EINVAL;
- if (zfs_ioc_vec[vec].zvec_pool_check)
- error = pool_status_check(zc->zc_name,
- zfs_ioc_vec[vec].zvec_namecheck);
+ error = pool_status_check(zc->zc_name,
+ zfs_ioc_vec[vec].zvec_namecheck,
+ zfs_ioc_vec[vec].zvec_pool_check);
break;
case DATASET_NAME:
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0)
error = EINVAL;
- if (zfs_ioc_vec[vec].zvec_pool_check)
- error = pool_status_check(zc->zc_name,
- zfs_ioc_vec[vec].zvec_namecheck);
+ error = pool_status_check(zc->zc_name,
+ zfs_ioc_vec[vec].zvec_namecheck,
+ zfs_ioc_vec[vec].zvec_pool_check);
break;
case NO_NAME:
}
kmem_free(zc, sizeof (zfs_cmd_t));
- return (error);
+ return (-error);
}
-static int
-zfs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+#ifdef CONFIG_COMPAT
+static long
+zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{
- if (cmd != DDI_ATTACH)
- return (DDI_FAILURE);
-
- if (ddi_create_minor_node(dip, "zfs", S_IFCHR, 0,
- DDI_PSEUDO, 0) == DDI_FAILURE)
- return (DDI_FAILURE);
-
- zfs_dip = dip;
+ return zfsdev_ioctl(filp, cmd, arg);
+}
+#else
+#define zfs_compat_ioctl NULL
+#endif
- ddi_report_dev(dip);
+static const struct file_operations zfsdev_fops = {
+ .open = zfsdev_open,
+ .release = zfsdev_release,
+ .unlocked_ioctl = zfsdev_ioctl,
+ .compat_ioctl = zfsdev_compat_ioctl,
+ .owner = THIS_MODULE,
+};
- return (DDI_SUCCESS);
-}
+static struct miscdevice zfs_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = ZFS_DRIVER,
+ .fops = &zfsdev_fops,
+};
static int
-zfs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+zfs_attach(void)
{
- if (spa_busy() || zfs_busy() || zvol_busy())
- return (DDI_FAILURE);
-
- if (cmd != DDI_DETACH)
- return (DDI_FAILURE);
+ int error;
- zfs_dip = NULL;
+ mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&zfsdev_state_list, sizeof (zfsdev_state_t),
+ offsetof(zfsdev_state_t, zs_next));
- ddi_prop_remove_all(dip);
- ddi_remove_minor_node(dip, NULL);
+ error = misc_register(&zfs_misc);
+ if (error) {
+ printk(KERN_INFO "ZFS: misc_register() failed %d\n", error);
+ return (error);
+ }
- return (DDI_SUCCESS);
+ return (0);
}
-/*ARGSUSED*/
-static int
-zfs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+static void
+zfs_detach(void)
{
- switch (infocmd) {
- case DDI_INFO_DEVT2DEVINFO:
- *result = zfs_dip;
- return (DDI_SUCCESS);
+ int error;
- case DDI_INFO_DEVT2INSTANCE:
- *result = (void *)0;
- return (DDI_SUCCESS);
- }
+ error = misc_deregister(&zfs_misc);
+ if (error)
+ printk(KERN_INFO "ZFS: misc_deregister() failed %d\n", error);
- return (DDI_FAILURE);
+ mutex_destroy(&zfsdev_state_lock);
+ list_destroy(&zfsdev_state_list);
}
-/*
- * OK, so this is a little weird.
- *
- * /dev/zfs is the control node, i.e. minor 0.
- * /dev/zvol/[r]dsk/pool/dataset are the zvols, minor > 0.
- *
- * /dev/zfs has basically nothing to do except serve up ioctls,
- * so most of the standard driver entry points are in zvol.c.
- */
-static struct cb_ops zfs_cb_ops = {
- zvol_open, /* open */
- zvol_close, /* close */
- zvol_strategy, /* strategy */
- nodev, /* print */
- zvol_dump, /* dump */
- zvol_read, /* read */
- zvol_write, /* write */
- zfsdev_ioctl, /* ioctl */
- nodev, /* devmap */
- nodev, /* mmap */
- nodev, /* segmap */
- nochpoll, /* poll */
- ddi_prop_op, /* prop_op */
- NULL, /* streamtab */
- D_NEW | D_MP | D_64BIT, /* Driver compatibility flag */
- CB_REV, /* version */
- nodev, /* async read */
- nodev, /* async write */
-};
-
-static struct dev_ops zfs_dev_ops = {
- DEVO_REV, /* version */
- 0, /* refcnt */
- zfs_info, /* info */
- nulldev, /* identify */
- nulldev, /* probe */
- zfs_attach, /* attach */
- zfs_detach, /* detach */
- nodev, /* reset */
- &zfs_cb_ops, /* driver operations */
- NULL, /* no bus operations */
- NULL, /* power */
- ddi_quiesce_not_needed, /* quiesce */
-};
-
-static struct modldrv zfs_modldrv = {
- &mod_driverops,
- "ZFS storage pool",
- &zfs_dev_ops
-};
-
-static struct modlinkage modlinkage = {
- MODREV_1,
- (void *)&zfs_modlfs,
- (void *)&zfs_modldrv,
- NULL
-};
-
-
+#ifdef HAVE_ZPL
uint_t zfs_fsyncer_key;
extern uint_t rrw_tsd_key;
+#endif
+
+#ifdef DEBUG
+#define ZFS_DEBUG_STR " (DEBUG mode)"
+#else
+#define ZFS_DEBUG_STR ""
+#endif
int
_init(void)
spa_init(FREAD | FWRITE);
zfs_init();
- zvol_init();
- if ((error = mod_install(&modlinkage)) != 0) {
- zvol_fini();
- zfs_fini();
- spa_fini();
- return (error);
- }
+ if ((error = zvol_init()) != 0)
+ goto out1;
+ if ((error = zfs_attach()) != 0)
+ goto out2;
+
+#ifdef HAVE_ZPL
tsd_create(&zfs_fsyncer_key, NULL);
tsd_create(&rrw_tsd_key, NULL);
- error = ldi_ident_from_mod(&modlinkage, &zfs_li);
- ASSERT(error == 0);
mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL);
+#endif /* HAVE_ZPL */
+
+ printk(KERN_NOTICE "ZFS: Loaded ZFS Filesystem v%s%s\n",
+ ZFS_META_VERSION, ZFS_DEBUG_STR);
return (0);
+
+out2:
+ (void) zvol_fini();
+out1:
+ zfs_fini();
+ spa_fini();
+ printk(KERN_NOTICE "ZFS: Failed to Load ZFS Filesystem v%s%s"
+ ", rc = %d\n", ZFS_META_VERSION, ZFS_DEBUG_STR, error);
+
+ return (error);
}
int
_fini(void)
{
- int error;
-
- if (spa_busy() || zfs_busy() || zvol_busy() || zio_injection_enabled)
- return (EBUSY);
-
- if ((error = mod_remove(&modlinkage)) != 0)
- return (error);
-
+ zfs_detach();
zvol_fini();
zfs_fini();
spa_fini();
+#ifdef HAVE_ZPL
if (zfs_nfsshare_inited)
(void) ddi_modclose(nfs_mod);
if (zfs_smbshare_inited)
if (zfs_nfsshare_inited || zfs_smbshare_inited)
(void) ddi_modclose(sharefs_mod);
- tsd_destroy(&zfs_fsyncer_key);
- ldi_ident_release(zfs_li);
- zfs_li = NULL;
mutex_destroy(&zfs_share_lock);
+ tsd_destroy(&zfs_fsyncer_key);
+#endif /* HAVE_ZPL */
- return (error);
-}
+ printk(KERN_NOTICE "ZFS: Unloaded ZFS Filesystem v%s%s\n",
+ ZFS_META_VERSION, ZFS_DEBUG_STR);
-int
-_info(struct modinfo *modinfop)
-{
- return (mod_info(&modlinkage, modinfop));
+ return (0);
}
+
+#ifdef HAVE_SPL
+spl_module_init(_init);
+spl_module_exit(_fini);
+
+MODULE_DESCRIPTION("ZFS");
+MODULE_AUTHOR(ZFS_META_AUTHOR);
+MODULE_LICENSE(ZFS_META_LICENSE);
+#endif /* HAVE_SPL */