* return (error); // done, report error
*/
+/*
+ * Virus scanning is unsupported. It would be possible to add a hook
+ * here to performance the required virus scan. This could be done
+ * entirely in the kernel or potentially as an update to invoke a
+ * scanning utility.
+ */
+static int
+zfs_vscan(struct inode *ip, cred_t *cr, int async)
+{
+ return (0);
+}
+
+/* ARGSUSED */
+int
+zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+
+ /* Honor ZFS_APPENDONLY file attribute */
+ if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
+ ((flag & O_APPEND) == 0)) {
+ ZFS_EXIT(zsb);
+ return (EPERM);
+ }
+
+ /* Virus scan eligible files on open */
+ if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
+ !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
+ if (zfs_vscan(ip, cr, 0) != 0) {
+ ZFS_EXIT(zsb);
+ return (EACCES);
+ }
+ }
+
+ /* Keep a count of the synchronous opens in the znode */
+ if (flag & O_SYNC)
+ atomic_inc_32(&zp->z_sync_cnt);
+
+ ZFS_EXIT(zsb);
+ return (0);
+}
+EXPORT_SYMBOL(zfs_open);
+
+/* ARGSUSED */
+int
+zfs_close(struct inode *ip, int flag, cred_t *cr)
+{
+ znode_t *zp = ITOZ(ip);
+ zfs_sb_t *zsb = ITOZSB(ip);
+
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+
+ /* Decrement the synchronous opens in the znode */
+ if (flag & O_SYNC)
+ zp->z_sync_cnt = 0;
+
+ if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
+ !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
+ VERIFY(zfs_vscan(ip, cr, 1) == 0);
+
+ ZFS_EXIT(zsb);
+ return (0);
+}
+EXPORT_SYMBOL(zfs_close);
+
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
mutex_enter(&zp->z_lock);
vap->va_type = vn_mode_to_vtype(zp->z_mode);
vap->va_mode = zp->z_mode;
- vap->va_fsid = 0;
+ vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
vap->va_nodeid = zp->z_id;
if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
links = zp->z_links + 1;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
- xvattr_t tmpxvattr;
+ xvattr_t *tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask;
int trim_mask = 0;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
- sa_bulk_attr_t bulk[7], xattr_bulk[7];
+ sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0;
if (mask == 0)
*/
xoap = xva_getxoptattr(xvap);
- xva_init(&tmpxvattr);
+ tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP);
+ xva_init(tmpxvattr);
+
+ bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
+ xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
- ZFS_EXIT(zsb);
- return (EPERM);
+ err = EPERM;
+ goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
- ZFS_EXIT(zsb);
- return (EPERM);
+ err = EPERM;
+ goto out3;
}
/*
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
- ZFS_EXIT(zsb);
- return (EOVERFLOW);
+ err = EOVERFLOW;
+ goto out3;
}
}
/* Can this be moved to before the top label? */
if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
- ZFS_EXIT(zsb);
- return (EROFS);
+ err = EROFS;
+ goto out3;
}
/*
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
+
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
/* Careful negative Linux return code here */
err = -vmtruncate(ip, vap->va_size);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
}
if (mask & (ATTR_ATIME|ATTR_MTIME) ||
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
- XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
+ XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
}
}
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
- XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
+ XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
}
}
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
- XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
+ XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
}
}
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
- XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
+ XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
}
}
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
- XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
+ XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
}
}
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
- XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
+ XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
- ZFS_EXIT(zsb);
- return (EPERM);
+ err = EPERM;
+ goto out3;
}
if (need_policy == FALSE &&
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
+
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
- if (err) {
- ZFS_EXIT(zsb);
- return (err);
- }
+ if (err)
+ goto out3;
if (trim_mask)
vap->va_mask |= saved_mask;
* so that return masks can be set for caller.
*/
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
- if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
+ if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
+out3:
+ kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7);
+ kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7);
+ kmem_free(tmpxvattr, sizeof(xvattr_t));
ZFS_EXIT(zsb);
return (err);
}