#include <sys/kidmap.h>
#include <sys/cred.h>
#include <sys/attr.h>
+#include <sys/zpl.h>
/*
* Programming rules.
* return (error); // done, report error
*/
+/*
+ * Virus scanning is unsupported. It would be possible to add a hook
+ * here to performance the required virus scan. This could be done
+ * entirely in the kernel or potentially as an update to invoke a
+ * scanning utility.
+ */
+static int
+zfs_vscan(struct inode *ip, cred_t *cr, int async)
+{
+ return (0);
+}
+
+/* ARGSUSED */
+int
+zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+
+ /* Honor ZFS_APPENDONLY file attribute */
+ if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
+ ((flag & O_APPEND) == 0)) {
+ ZFS_EXIT(zsb);
+ return (EPERM);
+ }
+
+ /* Virus scan eligible files on open */
+ if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
+ !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
+ if (zfs_vscan(ip, cr, 0) != 0) {
+ ZFS_EXIT(zsb);
+ return (EACCES);
+ }
+ }
+
+ /* Keep a count of the synchronous opens in the znode */
+ if (flag & O_SYNC)
+ atomic_inc_32(&zp->z_sync_cnt);
+
+ ZFS_EXIT(zsb);
+ return (0);
+}
+EXPORT_SYMBOL(zfs_open);
+
+/* ARGSUSED */
+int
+zfs_close(struct inode *ip, int flag, cred_t *cr)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+
+ /*
+ * Zero the synchronous opens in the znode. Under Linux the
+ * zfs_close() hook is not symmetric with zfs_open(), it is
+ * only called once when the last reference is dropped.
+ */
+ if (flag & O_SYNC)
+ zp->z_sync_cnt = 0;
+
+ if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
+ !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
+ VERIFY(zfs_vscan(ip, cr, 1) == 0);
+
+ ZFS_EXIT(zsb);
+ return (0);
+}
+EXPORT_SYMBOL(zfs_close);
+
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
}
#endif /* _KERNEL */
-offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
+unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
return (0);
}
-#ifdef HAVE_MANDLOCKS
/*
* Check for mandatory locks
*/
- if (MANDMODE(zp->z_mode)) {
- if (error = chklock(ip, FREAD,
- uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
- ZFS_EXIT(zsb);
- return (error);
- }
+ if (mandatory_lock(ip) &&
+ !lock_may_read(ip, uio->uio_loffset, uio->uio_resid)) {
+ ZFS_EXIT(zsb);
+ return (EAGAIN);
}
-#endif /* HAVE_MANDLOCK */
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
return (EINVAL);
}
-#ifdef HAVE_MANDLOCKS
/*
* Check for mandatory locks before calling zfs_range_lock()
* in order to prevent a deadlock with locks set via fcntl().
*/
- if (MANDMODE((mode_t)zp->z_mode) &&
- (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
+ if (mandatory_lock(ip) && !lock_may_write(ip, woff, n)) {
ZFS_EXIT(zsb);
- return (error);
+ return (EAGAIN);
}
-#endif /* HAVE_MANDLOCKS */
#ifdef HAVE_UIO_ZEROCOPY
/*
if (flags & LOOKUP_XATTR) {
/*
- * If the xattr property is off, refuse the lookup request.
- */
- if (!(zsb->z_flags & ZSB_XATTR_USER)) {
- ZFS_EXIT(zsb);
- return (EINVAL);
- }
-
- /*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
return (EILSEQ);
}
-#ifdef HAVE_XVATTR
- if (vap->va_mask & AT_XVATTR) {
+ if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zsb);
return (error);
}
}
-#endif /* HAVE_XVATTR */
top:
*ipp = NULL;
if (flags & FIGNORECASE)
zf |= ZCILOOK;
-#ifdef HAVE_XVATTR
- if (vap->va_mask & AT_XVATTR) {
+ if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zsb);
return (error);
}
}
-#endif /* HAVE_XVATTR */
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids)) != 0) {
* vattr structure.
*
* IN: ip - inode of file.
- * stat - kstat structure to fill in.
+ * vap - va_mask identifies requested attributes.
+ * If ATTR_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
- * OUT: stat - filled in kstat values.
+ * OUT: vap - attribute values.
+ *
+ * RETURN: 0 (always succeeds)
*/
/* ARGSUSED */
int
-zfs_getattr(struct inode *ip, struct kstat *stat, int flags, cred_t *cr)
+zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ITOZSB(ip);
int error = 0;
uint64_t links;
uint64_t mtime[2], ctime[2];
- uint32_t blksz;
+ xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
+ xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[2];
int count = 0;
ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- zfs_fuid_map_ids(zp, cr, &stat->uid, &stat->gid);
+ zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
- (stat->uid != crgetuid(cr))) {
+ (vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
ZFS_EXIT(zsb);
*/
mutex_enter(&zp->z_lock);
- stat->ino = ip->i_ino;
- stat->mode = zp->z_mode;
- stat->uid = zp->z_uid;
- stat->gid = zp->z_gid;
+ vap->va_type = vn_mode_to_vtype(zp->z_mode);
+ vap->va_mode = zp->z_mode;
+ vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
+ vap->va_nodeid = zp->z_id;
if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
links = zp->z_links + 1;
else
links = zp->z_links;
- stat->nlink = MIN(links, ZFS_LINK_MAX);
- stat->size = i_size_read(ip);
- stat->rdev = ip->i_rdev;
- stat->dev = ip->i_rdev;
+ vap->va_nlink = MIN(links, ZFS_LINK_MAX);
+ vap->va_size = i_size_read(ip);
+ vap->va_rdev = ip->i_rdev;
+ vap->va_seq = ip->i_generation;
+
+ /*
+ * Add in any requested optional attributes and the create time.
+ * Also set the corresponding bits in the returned attribute bitmap.
+ */
+ if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) {
+ if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
+ xoap->xoa_archive =
+ ((zp->z_pflags & ZFS_ARCHIVE) != 0);
+ XVA_SET_RTN(xvap, XAT_ARCHIVE);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
+ xoap->xoa_readonly =
+ ((zp->z_pflags & ZFS_READONLY) != 0);
+ XVA_SET_RTN(xvap, XAT_READONLY);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
+ xoap->xoa_system =
+ ((zp->z_pflags & ZFS_SYSTEM) != 0);
+ XVA_SET_RTN(xvap, XAT_SYSTEM);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
+ xoap->xoa_hidden =
+ ((zp->z_pflags & ZFS_HIDDEN) != 0);
+ XVA_SET_RTN(xvap, XAT_HIDDEN);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
+ xoap->xoa_nounlink =
+ ((zp->z_pflags & ZFS_NOUNLINK) != 0);
+ XVA_SET_RTN(xvap, XAT_NOUNLINK);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
+ xoap->xoa_immutable =
+ ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
+ XVA_SET_RTN(xvap, XAT_IMMUTABLE);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
+ xoap->xoa_appendonly =
+ ((zp->z_pflags & ZFS_APPENDONLY) != 0);
+ XVA_SET_RTN(xvap, XAT_APPENDONLY);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
+ xoap->xoa_nodump =
+ ((zp->z_pflags & ZFS_NODUMP) != 0);
+ XVA_SET_RTN(xvap, XAT_NODUMP);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
+ xoap->xoa_opaque =
+ ((zp->z_pflags & ZFS_OPAQUE) != 0);
+ XVA_SET_RTN(xvap, XAT_OPAQUE);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
+ xoap->xoa_av_quarantined =
+ ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
+ XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
+ xoap->xoa_av_modified =
+ ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
+ XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
+ S_ISREG(ip->i_mode)) {
+ zfs_sa_get_scanstamp(zp, xvap);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
+ uint64_t times[2];
+
+ (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb),
+ times, sizeof (times));
+ ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
+ XVA_SET_RTN(xvap, XAT_CREATETIME);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
+ xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
+ XVA_SET_RTN(xvap, XAT_REPARSE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
+ xoap->xoa_generation = zp->z_gen;
+ XVA_SET_RTN(xvap, XAT_GEN);
+ }
- ZFS_TIME_DECODE(&stat->atime, zp->z_atime);
- ZFS_TIME_DECODE(&stat->mtime, mtime);
- ZFS_TIME_DECODE(&stat->ctime, ctime);
+ if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
+ xoap->xoa_offline =
+ ((zp->z_pflags & ZFS_OFFLINE) != 0);
+ XVA_SET_RTN(xvap, XAT_OFFLINE);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
+ xoap->xoa_sparse =
+ ((zp->z_pflags & ZFS_SPARSE) != 0);
+ XVA_SET_RTN(xvap, XAT_SPARSE);
+ }
+ }
+
+ ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
+ ZFS_TIME_DECODE(&vap->va_mtime, mtime);
+ ZFS_TIME_DECODE(&vap->va_ctime, ctime);
mutex_exit(&zp->z_lock);
- sa_object_size(zp->z_sa_hdl, &blksz, &stat->blocks);
- stat->blksize = (1 << ip->i_blkbits);
+ sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
- stat->blksize = zsb->z_max_blksz;
+ vap->va_blksize = zsb->z_max_blksz;
}
ZFS_EXIT(zsb);
EXPORT_SYMBOL(zfs_getattr);
/*
+ * Get the basic file attributes and place them in the provided kstat
+ * structure. The inode is assumed to be the authoritative source
+ * for most of the attributes. However, the znode currently has the
+ * authoritative atime, blksize, and block count.
+ *
+ * IN: ip - inode of file.
+ *
+ * OUT: sp - kstat values.
+ *
+ * RETURN: 0 (always succeeds)
+ */
+/* ARGSUSED */
+int
+zfs_getattr_fast(struct inode *ip, struct kstat *sp)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+
+ mutex_enter(&zp->z_lock);
+
+ generic_fillattr(ip, sp);
+ ZFS_TIME_DECODE(&sp->atime, zp->z_atime);
+
+ sa_object_size(zp->z_sa_hdl, (uint32_t *)&sp->blksize, &sp->blocks);
+ if (unlikely(zp->z_blksz == 0)) {
+ /*
+ * Block size hasn't been set; suggest maximal I/O transfers.
+ */
+ sp->blksize = zsb->z_max_blksz;
+ }
+
+ mutex_exit(&zp->z_lock);
+
+ ZFS_EXIT(zsb);
+
+ return (0);
+}
+EXPORT_SYMBOL(zfs_getattr_fast);
+
+/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: ip - inode of file to be modified.
* vap - new attribute values.
- * If AT_XVATTR set, then optional attrs are being set
+ * If ATTR_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
*/
/* ARGSUSED */
int
-zfs_setattr(struct inode *ip, struct iattr *attr, int flags, cred_t *cr)
+zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfs_sb_t *zsb = ITOZSB(ip);
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
- uint_t mask = attr->ia_valid;
+ xvattr_t *tmpxvattr;
+ uint_t mask = vap->va_mask;
uint_t saved_mask;
int trim_mask = 0;
uint64_t new_mode;
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
+ xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
+ xoptattr_t *xoap;
+ zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
- zfs_acl_t *aclp = NULL;
boolean_t fuid_dirtied = B_FALSE;
- sa_bulk_attr_t bulk[7], xattr_bulk[7];
+ sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0;
if (mask == 0)
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
+
if (zsb->z_use_fuids == B_FALSE &&
- (((mask & ATTR_UID) && IS_EPHEMERAL(attr->ia_uid)) ||
- ((mask & ATTR_GID) && IS_EPHEMERAL(attr->ia_gid)))) {
+ (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
+ ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
+ (mask & ATTR_XVATTR))) {
ZFS_EXIT(zsb);
return (EINVAL);
}
return (EINVAL);
}
+ /*
+ * If this is an xvattr_t, then get a pointer to the structure of
+ * optional attributes. If this is NULL, then we have a vattr_t.
+ */
+ xoap = xva_getxoptattr(xvap);
+
+ tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP);
+ xva_init(tmpxvattr);
+
+ bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
+ xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
+
+ /*
+ * Immutable files can only alter immutable bit and atime
+ */
+ if ((zp->z_pflags & ZFS_IMMUTABLE) &&
+ ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
+ ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
+ err = EPERM;
+ goto out3;
+ }
+
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
- ZFS_EXIT(zsb);
- return (EPERM);
+ err = EPERM;
+ goto out3;
+ }
+
+ /*
+ * Verify timestamps doesn't overflow 32 bits.
+ * ZFS can handle large timestamps, but 32bit syscalls can't
+ * handle times greater than 2039. This check should be removed
+ * once large timestamps are fully supported.
+ */
+ if (mask & (ATTR_ATIME | ATTR_MTIME)) {
+ if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
+ ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
+ err = EOVERFLOW;
+ goto out3;
+ }
}
top:
aclp = NULL;
/* Can this be moved to before the top label? */
- if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
- ZFS_EXIT(zsb);
- return (EROFS);
+ if (zfs_is_readonly(zsb)) {
+ err = EROFS;
+ goto out3;
}
/*
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
+
+ truncate_setsize(ip, vap->va_size);
+
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
- err = zfs_freesp(zp, attr->ia_size, 0, 0, FALSE);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
+ if (err)
+ goto out3;
+ }
- /* Careful negative Linux return code here */
- err = -vmtruncate(ip, attr->ia_size);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (mask & (ATTR_ATIME|ATTR_MTIME) ||
+ ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
+ XVA_ISSET_REQ(xvap, XAT_READONLY) ||
+ XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
+ XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
+ XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
+ XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
+ XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
+ need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
+ skipaclchk, cr);
}
if (mask & (ATTR_UID|ATTR_GID)) {
*/
if (!(mask & ATTR_MODE))
- attr->ia_mode = zp->z_mode;
+ vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
- take_owner = (mask & ATTR_UID) &&
- (attr->ia_uid == crgetuid(cr));
+ take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & ATTR_GID) &&
- zfs_groupmember(zsb, attr->ia_gid, cr);
+ zfs_groupmember(zsb, vap->va_gid, cr);
/*
- * If both AT_UID and AT_GID are set then take_owner and
+ * If both ATTR_UID and ATTR_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
/*
* Remove setuid/setgid for non-privileged users
*/
- secpolicy_setid_clear(attr, cr);
+ (void) secpolicy_setid_clear(vap, cr);
trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
+ if (mask & ATTR_XVATTR) {
+ /*
+ * Update xvattr mask to include only those attributes
+ * that are actually changing.
+ *
+ * the bits will be restored prior to actually setting
+ * the attributes so the caller thinks they were set.
+ */
+ if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
+ if (xoap->xoa_appendonly !=
+ ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_APPENDONLY);
+ XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
+ if (xoap->xoa_nounlink !=
+ ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_NOUNLINK);
+ XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
+ if (xoap->xoa_immutable !=
+ ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
+ XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
+ if (xoap->xoa_nodump !=
+ ((zp->z_pflags & ZFS_NODUMP) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_NODUMP);
+ XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
+ if (xoap->xoa_av_modified !=
+ ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
+ XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
+ if ((!S_ISREG(ip->i_mode) &&
+ xoap->xoa_av_quarantined) ||
+ xoap->xoa_av_quarantined !=
+ ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
+ need_policy = TRUE;
+ } else {
+ XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
+ XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
+ }
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
+ mutex_exit(&zp->z_lock);
+ err = EPERM;
+ goto out3;
+ }
+
+ if (need_policy == FALSE &&
+ (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
+ XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
+ need_policy = TRUE;
+ }
+ }
mutex_exit(&zp->z_lock);
if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
- err = secpolicy_setid_setsticky_clear(ip, attr,
+ err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
+
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
*/
if (trim_mask) {
- saved_mask = attr->ia_valid;
- attr->ia_valid &= ~trim_mask;
+ saved_mask = vap->va_mask;
+ vap->va_mask &= ~trim_mask;
}
- err = secpolicy_vnode_setattr(cr, ip, attr, &oldva, flags,
+ err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
if (trim_mask)
- attr->ia_valid |= saved_mask;
+ vap->va_mask |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
- mask = attr->ia_valid;
+ mask = vap->va_mask;
if ((mask & (ATTR_UID | ATTR_GID))) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
}
if (mask & ATTR_UID) {
new_uid = zfs_fuid_create(zsb,
- (uint64_t)attr->ia_uid, cr, ZFS_OWNER, &fuidp);
+ (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
if (attrzp)
}
if (mask & ATTR_GID) {
- new_gid = zfs_fuid_create(zsb, (uint64_t)attr->ia_gid,
+ new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
- new_mode = (pmode & S_IFMT) | (attr->ia_mode & ~S_IFMT);
+ new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
zfs_acl_chmod_setattr(zp, &aclp, new_mode);
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
- dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+ if ((mask & ATTR_XVATTR) &&
+ XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
+ else
+ dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
- ASSERT3U((uintptr_t)aclp, !=, NULL);
+ ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT3U(err, ==, 0);
if (zp->z_acl_cached)
if (mask & ATTR_ATIME) {
- ZFS_TIME_ENCODE(&attr->ia_atime, zp->z_atime);
+ ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
if (mask & ATTR_MTIME) {
- ZFS_TIME_ENCODE(&attr->ia_mtime, mtime);
+ ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
mtime, sizeof (mtime));
}
* update from toggling bit
*/
+ if (xoap && (mask & ATTR_XVATTR)) {
+
+ /*
+ * restore trimmed off masks
+ * so that return masks can be set for caller.
+ */
+
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
+ XVA_SET_REQ(xvap, XAT_APPENDONLY);
+ }
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
+ XVA_SET_REQ(xvap, XAT_NOUNLINK);
+ }
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
+ XVA_SET_REQ(xvap, XAT_IMMUTABLE);
+ }
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
+ XVA_SET_REQ(xvap, XAT_NODUMP);
+ }
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
+ XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
+ }
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
+ XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
+ }
+
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ ASSERT(S_ISREG(ip->i_mode));
+
+ zfs_xvattr_set(zp, xvap, tx);
+ }
+
if (fuid_dirtied)
zfs_fuid_sync(zsb, tx);
if (mask != 0)
- zfs_log_setattr(zilog, tx, TX_SETATTR, zp, attr, mask, fuidp);
+ zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
mutex_exit(&zp->z_lock);
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
+out3:
+ kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7);
+ kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7);
+ kmem_free(tmpxvattr, sizeof(xvattr_t));
ZFS_EXIT(zsb);
return (err);
}
}
EXPORT_SYMBOL(zfs_link);
-#ifdef HAVE_MMAP
-/*
- * zfs_null_putapage() is used when the file system has been force
- * unmounted. It just drops the pages.
- */
-/* ARGSUSED */
-static int
-zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
- size_t *lenp, int flags, cred_t *cr)
+static void
+zfs_putpage_commit_cb(void *arg, int error)
{
- pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
- return (0);
+ struct page *pp = arg;
+
+ if (error) {
+ __set_page_dirty_nobuffers(pp);
+
+ if (error != ECANCELED)
+ SetPageError(pp);
+ } else {
+ ClearPageError(pp);
+ }
+
+ end_page_writeback(pp);
}
/*
- * Push a page out to disk, klustering if possible.
+ * Push a page out to disk, once the page is on stable storage the
+ * registered commit callback will be run as notification of completion.
*
- * IN: vp - file to push page to.
- * pp - page to push.
- * flags - additional flags.
- * cr - credentials of caller.
- *
- * OUT: offp - start of range pushed.
- * lenp - len of range pushed.
+ * IN: ip - page mapped for inode.
+ * pp - page to push (page is locked)
+ * wbc - writeback control data
*
* RETURN: 0 if success
* error code if failure
*
- * NOTE: callers must have locked the page to be pushed. On
- * exit, the page (and all other pages in the kluster) must be
- * unlocked.
+ * Timestamps:
+ * ip - ctime|mtime updated
*/
/* ARGSUSED */
-static int
-zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
- size_t *lenp, int flags, cred_t *cr)
+int
+zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+ loff_t offset;
+ loff_t pgoff;
+ unsigned int pglen;
dmu_tx_t *tx;
- u_offset_t off, koff;
- size_t len, klen;
- int err;
+ caddr_t va;
+ int err = 0;
+ uint64_t mtime[2], ctime[2];
+ sa_bulk_attr_t bulk[3];
+ int cnt = 0;
- off = pp->p_offset;
- len = PAGESIZE;
- /*
- * If our blocksize is bigger than the page size, try to kluster
- * multiple pages so that we write a full block (thus avoiding
- * a read-modify-write).
- */
- if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
- klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
- koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
- ASSERT(koff <= zp->z_size);
- if (koff + klen > zp->z_size)
- klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
- pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
- }
- ASSERT3U(btop(len), ==, btopr(len));
- /*
- * Can't push pages past end-of-file.
- */
- if (off >= zp->z_size) {
- /* ignore all pages */
- err = 0;
- goto out;
- } else if (off + len > zp->z_size) {
- int npages = btopr(zp->z_size - off);
- page_t *trunc;
+ ASSERT(PageLocked(pp));
- page_list_break(&pp, &trunc, npages);
- /* ignore pages past end of file */
- if (trunc)
- pvn_write_done(trunc, flags);
- len = zp->z_size - off;
+ pgoff = page_offset(pp); /* Page byte-offset in file */
+ offset = i_size_read(ip); /* File length in bytes */
+ pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */
+ P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff);
+
+ /* Page is beyond end of file */
+ if (pgoff >= offset) {
+ unlock_page(pp);
+ return (0);
}
- if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
- zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
+ /* Truncate page length to end of file */
+ if (pgoff + pglen > offset)
+ pglen = offset - pgoff;
+
+#if 0
+ /*
+ * FIXME: Allow mmap writes past its quota. The correct fix
+ * is to register a page_mkwrite() handler to count the page
+ * against its quota when it is about to be dirtied.
+ */
+ if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
+ zfs_owner_overquota(zsb, zp, B_TRUE)) {
err = EDQUOT;
- goto out;
}
-top:
- tx = dmu_tx_create(zfsvfs->z_os);
- dmu_tx_hold_write(tx, zp->z_id, off, len);
+#endif
+
+ set_page_writeback(pp);
+ unlock_page(pp);
+
+ tx = dmu_tx_create(zsb->z_os);
+
+ dmu_tx_callback_register(tx, zfs_putpage_commit_cb, pp);
+
+ dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT);
if (err != 0) {
- if (err == ERESTART) {
+ if (err == ERESTART)
dmu_tx_wait(tx);
- dmu_tx_abort(tx);
- goto top;
- }
+
dmu_tx_abort(tx);
- goto out;
+ return (err);
}
- if (zp->z_blksz <= PAGESIZE) {
- caddr_t va = zfs_map_page(pp, S_READ);
- ASSERT3U(len, <=, PAGESIZE);
- dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
- zfs_unmap_page(pp, va);
- } else {
- err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
- }
+ va = kmap(pp);
+ ASSERT3U(pglen, <=, PAGE_CACHE_SIZE);
+ dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx);
+ kunmap(pp);
- if (err == 0) {
- uint64_t mtime[2], ctime[2];
- sa_bulk_attr_t bulk[3];
- int count = 0;
+ SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
+ SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
+ SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8);
+ zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
+ zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
- &mtime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
- &ctime, 16);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
- &zp->z_pflags, 8);
- zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
- B_TRUE);
- zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
- }
dmu_tx_commit(tx);
+ ASSERT3S(err, ==, 0);
-out:
- pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
- if (offp)
- *offp = off;
- if (lenp)
- *lenp = len;
+ if ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) ||
+ (wbc->sync_mode == WB_SYNC_ALL))
+ zil_commit(zsb->z_log, zp->z_id);
return (err);
}
-/*
- * Copy the portion of the file indicated from pages into the file.
- * The pages are stored in a page list attached to the files vnode.
- *
- * IN: vp - vnode of file to push page data to.
- * off - position in file to put data.
- * len - amount of data to write.
- * flags - flags to control the operation.
- * cr - credentials of caller.
- * ct - caller context.
- *
- * RETURN: 0 if success
- * error code if failure
- *
- * Timestamps:
- * vp - ctime|mtime updated
- */
-/*ARGSUSED*/
-static int
-zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
- caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- page_t *pp;
- size_t io_len;
- u_offset_t io_off;
- uint_t blksz;
- rl_t *rl;
- int error = 0;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /*
- * Align this request to the file block size in case we kluster.
- * XXX - this can result in pretty aggresive locking, which can
- * impact simultanious read/write access. One option might be
- * to break up long requests (len == 0) into block-by-block
- * operations to get narrower locking.
- */
- blksz = zp->z_blksz;
- if (ISP2(blksz))
- io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
- else
- io_off = 0;
- if (len > 0 && ISP2(blksz))
- io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
- else
- io_len = 0;
-
- if (io_len == 0) {
- /*
- * Search the entire vp list for pages >= io_off.
- */
- rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
- error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
- goto out;
- }
- rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
-
- if (off > zp->z_size) {
- /* past end of file */
- zfs_range_unlock(rl);
- ZFS_EXIT(zfsvfs);
- return (0);
- }
-
- len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
-
- for (off = io_off; io_off < off + len; io_off += io_len) {
- if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
- pp = page_lookup(vp, io_off,
- (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
- } else {
- pp = page_lookup_nowait(vp, io_off,
- (flags & B_FREE) ? SE_EXCL : SE_SHARED);
- }
-
- if (pp != NULL && pvn_getdirty(pp, flags)) {
- int err;
-
- /*
- * Found a dirty page to push
- */
- err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
- if (err)
- error = err;
- } else {
- io_len = PAGESIZE;
- }
- }
-out:
- zfs_range_unlock(rl);
- if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zfsvfs->z_log, zp->z_id);
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-#endif /* HAVE_MMAP */
-
/*ARGSUSED*/
void
zfs_inactive(struct inode *ip)
*/
/* ARGSUSED */
int
-zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp,
- caller_context_t *ct)
+zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
{
if (S_ISDIR(ip->i_mode))
return (0);
}
EXPORT_SYMBOL(zfs_seek);
-#ifdef HAVE_MMAP
-/*
- * Pre-filter the generic locking function to trap attempts to place
- * a mandatory lock on a memory mapped file.
- */
-static int
-zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
- flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
-{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
-
- ZFS_ENTER(zfsvfs);
- ZFS_VERIFY_ZP(zp);
-
- /*
- * We are following the UFS semantics with respect to mapcnt
- * here: If we see that the file is mapped already, then we will
- * return an error, but we don't worry about races between this
- * function and zfs_map().
- */
- if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
- ZFS_EXIT(zfsvfs);
- return (EAGAIN);
- }
- ZFS_EXIT(zfsvfs);
- return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
-}
-
/*
- * If we can't find a page in the cache, we will create a new page
- * and fill it with file data. For efficiency, we may try to fill
- * multiple pages at once (klustering) to fill up the supplied page
- * list. Note that the pages to be filled are held with an exclusive
- * lock to prevent access by other threads while they are being filled.
+ * Fill pages with data from the disk.
*/
static int
-zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
- caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
+zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
{
- znode_t *zp = VTOZ(vp);
- page_t *pp, *cur_pp;
- objset_t *os = zp->z_zfsvfs->z_os;
- u_offset_t io_off, total;
- size_t io_len;
- int err;
-
- if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
- /*
- * We only have a single page, don't bother klustering
- */
- io_off = off;
- io_len = PAGESIZE;
- pp = page_create_va(vp, io_off, io_len,
- PG_EXCL | PG_WAIT, seg, addr);
- } else {
- /*
- * Try to find enough pages to fill the page list
- */
- pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
- &io_len, off, plsz, 0);
- }
- if (pp == NULL) {
- /*
- * The page already exists, nothing to do here.
- */
- *pl = NULL;
- return (0);
- }
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+ objset_t *os;
+ struct page *cur_pp;
+ u_offset_t io_off, total;
+ size_t io_len;
+ loff_t i_size;
+ unsigned page_idx;
+ int err;
+
+ os = zsb->z_os;
+ io_len = nr_pages << PAGE_CACHE_SHIFT;
+ i_size = i_size_read(ip);
+ io_off = page_offset(pl[0]);
+
+ if (io_off + io_len > i_size)
+ io_len = i_size - io_off;
/*
- * Fill the pages in the kluster.
+ * Iterate over list of pages and read each page individually.
*/
- cur_pp = pp;
+ page_idx = 0;
+ cur_pp = pl[0];
for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
caddr_t va;
- ASSERT3U(io_off, ==, cur_pp->p_offset);
- va = zfs_map_page(cur_pp, S_WRITE);
+ va = kmap(cur_pp);
err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
DMU_READ_PREFETCH);
- zfs_unmap_page(cur_pp, va);
+ kunmap(cur_pp);
if (err) {
- /* On error, toss the entire kluster */
- pvn_read_done(pp, B_ERROR);
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = EIO;
return (err);
}
- cur_pp = cur_pp->p_next;
+ cur_pp = pl[++page_idx];
}
- /*
- * Fill in the page list array from the kluster starting
- * from the desired offset `off'.
- * NOTE: the page list will always be null terminated.
- */
- pvn_plist_init(pp, pl, plsz, off, io_len, rw);
- ASSERT(pl == NULL || (*pl)->p_offset == off);
-
return (0);
}
/*
- * Return pointers to the pages for the file region [off, off + len]
- * in the pl array. If plsz is greater than len, this function may
- * also return page pointers from after the specified region
- * (i.e. the region [off, off + plsz]). These additional pages are
- * only returned if they are already in the cache, or were created as
- * part of a klustered read.
- *
- * IN: vp - vnode of file to get data from.
- * off - position in file to get data from.
- * len - amount of data to retrieve.
- * plsz - length of provided page list.
- * seg - segment to obtain pages for.
- * addr - virtual address of fault.
- * rw - mode of created pages.
- * cr - credentials of caller.
- * ct - caller context.
+ * Uses zfs_fillpage to read data from the file and fill the pages.
*
- * OUT: protp - protection mode of created pages.
- * pl - list of pages created.
+ * IN: ip - inode of file to get data from.
+ * pl - list of pages to read
+ * nr_pages - number of pages to read
*
* RETURN: 0 if success
* error code if failure
* vp - atime updated
*/
/* ARGSUSED */
-static int
-zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
- page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
- enum seg_rw rw, cred_t *cr, caller_context_t *ct)
+int
+zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- page_t **pl0 = pl;
- int err = 0;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+ int err;
- /* we do our own caching, faultahead is unnecessary */
if (pl == NULL)
return (0);
- else if (len > plsz)
- len = plsz;
- else
- len = P2ROUNDUP(len, PAGESIZE);
- ASSERT(plsz >= len);
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- if (protp)
- *protp = PROT_ALL;
-
- /*
- * Loop through the requested range [off, off + len) looking
- * for pages. If we don't find a page, we will need to create
- * a new page and fill it with data from the file.
- */
- while (len > 0) {
- if (*pl = page_lookup(vp, off, SE_SHARED))
- *(pl+1) = NULL;
- else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
- goto out;
- while (*pl) {
- ASSERT3U((*pl)->p_offset, ==, off);
- off += PAGESIZE;
- addr += PAGESIZE;
- if (len > 0) {
- ASSERT3U(len, >=, PAGESIZE);
- len -= PAGESIZE;
- }
- ASSERT3U(plsz, >=, PAGESIZE);
- plsz -= PAGESIZE;
- pl++;
- }
- }
-
- /*
- * Fill out the page array with any pages already in the cache.
- */
- while (plsz > 0 &&
- (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
- off += PAGESIZE;
- plsz -= PAGESIZE;
- }
-out:
- if (err) {
- /*
- * Release any pages we have previously locked.
- */
- while (pl > pl0)
- page_unlock(*--pl);
- } else {
- ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
- }
+ err = zfs_fillpage(ip, pl, nr_pages);
- *pl = NULL;
+ if (!err)
+ ZFS_ACCESSTIME_STAMP(zsb, zp);
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (err);
}
+EXPORT_SYMBOL(zfs_getpage);
/*
- * Request a memory map for a section of a file. This code interacts
- * with common code and the VM system as follows:
- *
- * common code calls mmap(), which ends up in smmap_common()
- *
- * this calls VOP_MAP(), which takes you into (say) zfs
+ * Check ZFS specific permissions to memory map a section of a file.
*
- * zfs_map() calls as_map(), passing segvn_create() as the callback
+ * IN: ip - inode of the file to mmap
+ * off - file offset
+ * addrp - start address in memory region
+ * len - length of memory region
+ * vm_flags- address flags
*
- * segvn_create() creates the new segment and calls VOP_ADDMAP()
- *
- * zfs_addmap() updates z_mapcnt
+ * RETURN: 0 if success
+ * error code if failure
*/
/*ARGSUSED*/
-static int
-zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
- size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
- caller_context_t *ct)
+int
+zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
+ unsigned long vm_flags)
{
- znode_t *zp = VTOZ(vp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- segvn_crargs_t vn_a;
- int error;
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
- ZFS_ENTER(zfsvfs);
+ ZFS_ENTER(zsb);
ZFS_VERIFY_ZP(zp);
- if ((prot & PROT_WRITE) && (zp->z_pflags &
+ if ((vm_flags & VM_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EPERM);
}
- if ((prot & (PROT_READ | PROT_EXEC)) &&
+ if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (EACCES);
}
- if (vp->v_flag & VNOMAP) {
- ZFS_EXIT(zfsvfs);
- return (ENOSYS);
- }
-
if (off < 0 || len > MAXOFFSET_T - off) {
- ZFS_EXIT(zfsvfs);
+ ZFS_EXIT(zsb);
return (ENXIO);
}
- if (vp->v_type != VREG) {
- ZFS_EXIT(zfsvfs);
- return (ENODEV);
- }
-
- /*
- * If file is locked, disallow mapping.
- */
- if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
- ZFS_EXIT(zfsvfs);
- return (EAGAIN);
- }
-
- as_rangelock(as);
- error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
- if (error != 0) {
- as_rangeunlock(as);
- ZFS_EXIT(zfsvfs);
- return (error);
- }
-
- vn_a.vp = vp;
- vn_a.offset = (u_offset_t)off;
- vn_a.type = flags & MAP_TYPE;
- vn_a.prot = prot;
- vn_a.maxprot = maxprot;
- vn_a.cred = cr;
- vn_a.amp = NULL;
- vn_a.flags = flags & ~MAP_TYPE;
- vn_a.szc = 0;
- vn_a.lgrp_mem_policy_flags = 0;
-
- error = as_map(as, *addrp, len, segvn_create, &vn_a);
-
- as_rangeunlock(as);
- ZFS_EXIT(zfsvfs);
- return (error);
-}
-
-/* ARGSUSED */
-static int
-zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
- size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
- caller_context_t *ct)
-{
- uint64_t pages = btopr(len);
-
- atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
- return (0);
-}
-
-/*
- * The reason we push dirty pages as part of zfs_delmap() is so that we get a
- * more accurate mtime for the associated file. Since we don't have a way of
- * detecting when the data was actually modified, we have to resort to
- * heuristics. If an explicit msync() is done, then we mark the mtime when the
- * last page is pushed. The problem occurs when the msync() call is omitted,
- * which by far the most common case:
- *
- * open()
- * mmap()
- * <modify memory>
- * munmap()
- * close()
- * <time lapse>
- * putpage() via fsflush
- *
- * If we wait until fsflush to come along, we can have a modification time that
- * is some arbitrary point in the future. In order to prevent this in the
- * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
- * torn down.
- */
-/* ARGSUSED */
-static int
-zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
- size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
- caller_context_t *ct)
-{
- uint64_t pages = btopr(len);
-
- ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
- atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
-
- if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
- vn_has_cached_data(vp))
- (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
-
+ ZFS_EXIT(zsb);
return (0);
}
-#endif /* HAVE_MMAP */
+EXPORT_SYMBOL(zfs_map);
/*
* convoff - converts the given data (start, whence) to the
int
convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
{
- struct kstat stat;
+ vattr_t vap;
int error;
if ((lckdat->l_whence == 2) || (whence == 2)) {
- if ((error = zfs_getattr(ip, &stat, 0, CRED()) != 0))
+ if ((error = zfs_getattr(ip, &vap, 0, CRED()) != 0))
return (error);
}
lckdat->l_start += offset;
break;
case 2:
- lckdat->l_start += stat.size;
+ lckdat->l_start += vap.va_size;
/* FALLTHRU */
case 0:
break;
lckdat->l_start -= offset;
break;
case 2:
- lckdat->l_start -= stat.size;
+ lckdat->l_start -= vap.va_size;
/* FALLTHRU */
case 0:
break;
return (0);
}
#endif /* HAVE_UIO_ZEROCOPY */
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zfs_read_chunk_size, long, 0644);
+MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
+#endif