X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_vnops.c;h=8ec4db26f32e4062669e16808fad16013ba4f0d1;hb=1c24b699b0c7590e135f4701b50a4c933ebe0499;hp=c32b2a39f8da8184053c566004e8cdc199279be9;hpb=2cf7f52bc42f215d4ef27d0fd75fc1b1417cb841;p=zfs.git diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index c32b2a3..8ec4db2 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -63,6 +63,7 @@ #include #include #include "fs/fs_subr.h" +#include #include #include #include @@ -399,18 +400,14 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) return (0); } -#ifdef HAVE_MANDLOCKS /* * Check for mandatory locks */ - if (MANDMODE(zp->z_mode)) { - if (error = chklock(ip, FREAD, - uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { - ZFS_EXIT(zsb); - return (error); - } + if (mandatory_lock(ip) && + !lock_may_read(ip, uio->uio_loffset, uio->uio_resid)) { + ZFS_EXIT(zsb); + return (EAGAIN); } -#endif /* HAVE_MANDLOCK */ /* * If we're in FRSYNC mode, sync out this znode before reading it. @@ -581,17 +578,14 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) return (EINVAL); } -#ifdef HAVE_MANDLOCKS /* * Check for mandatory locks before calling zfs_range_lock() * in order to prevent a deadlock with locks set via fcntl(). */ - if (MANDMODE((mode_t)zp->z_mode) && - (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { + if (mandatory_lock(ip) && !lock_may_write(ip, woff, n)) { ZFS_EXIT(zsb); - return (error); + return (EAGAIN); } -#endif /* HAVE_MANDLOCKS */ #ifdef HAVE_UIO_ZEROCOPY /* @@ -874,7 +868,7 @@ iput_async(struct inode *ip, taskq_t *taskq) { ASSERT(atomic_read(&ip->i_count) > 0); if (atomic_read(&ip->i_count) == 1) - taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_SLEEP); + taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_PUSHPAGE); else iput(ip); } @@ -940,7 +934,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) return (ENOENT); } - zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); + zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE); zgd->zgd_zilog = zsb->z_log; zgd->zgd_private = zp; @@ -1120,14 +1114,6 @@ zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, if (flags & LOOKUP_XATTR) { /* - * If the xattr property is off, refuse the lookup request. - */ - if (!(zsb->z_flags & ZSB_XATTR)) { - ZFS_EXIT(zsb); - return (EINVAL); - } - - /* * We don't allow recursive attributes.. * Maybe someday we will. */ @@ -1914,13 +1900,13 @@ top: out: zfs_dirent_unlock(dl); + zfs_inode_update(dzp); + zfs_inode_update(zp); iput(ip); if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zilog, 0); - zfs_inode_update(dzp); - zfs_inode_update(zp); ZFS_EXIT(zsb); return (error); } @@ -2027,12 +2013,22 @@ zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir, goto update; } + /* + * Allow multiple entries provided the first entry is + * the object id. Non-zpl consumers may safely make + * use of the additional space. + * + * XXX: This should be a feature flag for compatibility + */ if (zap.za_integer_length != 8 || - zap.za_num_integers != 1) { + zap.za_num_integers == 0) { cmn_err(CE_WARN, "zap_readdir: bad directory " - "entry, obj = %lld, offset = %lld\n", + "entry, obj = %lld, offset = %lld, " + "length = %d, num = %lld\n", (u_longlong_t)zp->z_id, - (u_longlong_t)*pos); + (u_longlong_t)*pos, + zap.za_integer_length, + (u_longlong_t)zap.za_num_integers); error = ENXIO; goto update; } @@ -2050,7 +2046,7 @@ zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir, dmu_prefetch(os, objnum, 0, 0); } - if (*pos >= 2) { + if (*pos > 2 || (*pos == 2 && !zfs_show_ctldir(zp))) { zap_cursor_advance(&zc); *pos = zap_cursor_serialize(&zc); } else { @@ -2298,6 +2294,49 @@ zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) EXPORT_SYMBOL(zfs_getattr); /* + * Get the basic file attributes and place them in the provided kstat + * structure. The inode is assumed to be the authoritative source + * for most of the attributes. However, the znode currently has the + * authoritative atime, blksize, and block count. + * + * IN: ip - inode of file. + * + * OUT: sp - kstat values. + * + * RETURN: 0 (always succeeds) + */ +/* ARGSUSED */ +int +zfs_getattr_fast(struct inode *ip, struct kstat *sp) +{ + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); + + mutex_enter(&zp->z_lock); + + generic_fillattr(ip, sp); + ZFS_TIME_DECODE(&sp->atime, zp->z_atime); + + sa_object_size(zp->z_sa_hdl, (uint32_t *)&sp->blksize, &sp->blocks); + if (unlikely(zp->z_blksz == 0)) { + /* + * Block size hasn't been set; suggest maximal I/O transfers. + */ + sp->blksize = zsb->z_max_blksz; + } + + mutex_exit(&zp->z_lock); + + ZFS_EXIT(zsb); + + return (0); +} +EXPORT_SYMBOL(zfs_getattr_fast); + +/* * Set the file attributes to the values contained in the * vattr structure. * @@ -3712,141 +3751,203 @@ top: } EXPORT_SYMBOL(zfs_link); +static void +zfs_putpage_commit_cb(void *arg, int error) +{ + struct page *pp = arg; + + if (error) { + __set_page_dirty_nobuffers(pp); + + if (error != ECANCELED) + SetPageError(pp); + } else { + ClearPageError(pp); + } + + end_page_writeback(pp); +} + /* - * Push a page out to disk - * - * IN: vp - file to push page to. - * pp - page to push. - * off - start of range pushed. - * len - len of range pushed. + * Push a page out to disk, once the page is on stable storage the + * registered commit callback will be run as notification of completion. * + * IN: ip - page mapped for inode. + * pp - page to push (page is locked) + * wbc - writeback control data * * RETURN: 0 if success * error code if failure * - * NOTE: callers must have locked the page to be pushed. + * Timestamps: + * ip - ctime|mtime updated */ /* ARGSUSED */ -static int -zfs_putapage(struct inode *ip, struct page *pp, u_offset_t off, size_t len) +int +zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) { - znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); - dmu_tx_t *tx; - caddr_t va; - int err; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + loff_t offset; + loff_t pgoff; + unsigned int pglen; + rl_t *rl; + dmu_tx_t *tx; + caddr_t va; + int err = 0; + uint64_t mtime[2], ctime[2]; + sa_bulk_attr_t bulk[3]; + int cnt = 0; + int sync; + + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); + + ASSERT(PageLocked(pp)); + + pgoff = page_offset(pp); /* Page byte-offset in file */ + offset = i_size_read(ip); /* File length in bytes */ + pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ + P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff); + + /* Page is beyond end of file */ + if (pgoff >= offset) { + unlock_page(pp); + ZFS_EXIT(zsb); + return (0); + } + + /* Truncate page length to end of file */ + if (pgoff + pglen > offset) + pglen = offset - pgoff; +#if 0 /* - * Can't push pages past end-of-file. + * FIXME: Allow mmap writes past its quota. The correct fix + * is to register a page_mkwrite() handler to count the page + * against its quota when it is about to be dirtied. */ - if (off >= zp->z_size) { - /* ignore all pages */ - err = 0; - goto out; - } else if (off + len > zp->z_size) - len = zp->z_size - off; - if (zfs_owner_overquota(zsb, zp, B_FALSE) || zfs_owner_overquota(zsb, zp, B_TRUE)) { err = EDQUOT; - goto out; } -top: +#endif + + set_page_writeback(pp); + unlock_page(pp); + + rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER); tx = dmu_tx_create(zsb->z_os); - dmu_tx_hold_write(tx, zp->z_id, off, len); + + sync = ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) || + (wbc->sync_mode == WB_SYNC_ALL)); + if (!sync) + dmu_tx_callback_register(tx, zfs_putpage_commit_cb, pp); + + dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); err = dmu_tx_assign(tx, TXG_NOWAIT); if (err != 0) { - if (err == ERESTART) { + if (err == ERESTART) dmu_tx_wait(tx); - dmu_tx_abort(tx); - goto top; - } + + /* Will call all registered commit callbacks */ dmu_tx_abort(tx); - goto out; + + /* + * For the synchronous case the commit callback must be + * explicitly called because there is no registered callback. + */ + if (sync) + zfs_putpage_commit_cb(pp, ECANCELED); + + zfs_range_unlock(rl); + ZFS_EXIT(zsb); + return (err); } va = kmap(pp); - ASSERT3U(len, <=, PAGESIZE); - dmu_write(zsb->z_os, zp->z_id, off, len, va, tx); + ASSERT3U(pglen, <=, PAGE_CACHE_SIZE); + dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx); kunmap(pp); - if (err == 0) { - uint64_t mtime[2], ctime[2]; - sa_bulk_attr_t bulk[3]; - int count = 0; + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, - &mtime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, - &ctime, 16); - SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, - &zp->z_pflags, 8); - zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, - B_TRUE); - zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, off, len, 0); - } + /* Preserve the mtime and ctime provided by the inode */ + ZFS_TIME_ENCODE(&ip->i_mtime, mtime); + ZFS_TIME_ENCODE(&ip->i_ctime, ctime); + zp->z_atime_dirty = 0; + zp->z_seq++; + + err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); + + zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0); dmu_tx_commit(tx); -out: + zfs_range_unlock(rl); + + if (sync) { + zil_commit(zsb->z_log, zp->z_id); + zfs_putpage_commit_cb(pp, err); + } + + ZFS_EXIT(zsb); return (err); } /* - * Copy the portion of the file indicated from page into the file. - * - * IN: ip - inode of file to push page data to. - * wbc - Unused parameter - * data - pointer to address_space - * - * RETURN: 0 if success - * error code if failure - * - * Timestamps: - * vp - ctime|mtime updated + * Update the system attributes when the inode has been dirtied. For the + * moment we're conservative and only update the atime, mtime, and ctime. */ -/*ARGSUSED*/ int -zfs_putpage(struct page *page, struct writeback_control *wbc, void *data) +zfs_dirty_inode(struct inode *ip, int flags) { - struct address_space *mapping = data; - struct inode *ip = mapping->host; - znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); - rl_t *rl; - u_offset_t io_off; - size_t io_len; - size_t len; - int error; - - io_off = page_offset(page); - io_len = PAGESIZE; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + dmu_tx_t *tx; + uint64_t atime[2], mtime[2], ctime[2]; + sa_bulk_attr_t bulk[3]; + int error; + int cnt = 0; ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER); + tx = dmu_tx_create(zsb->z_os); - if (io_off > zp->z_size) { - /* past end of file */ - zfs_range_unlock(rl); - ZFS_EXIT(zsb); - return (0); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, zp); + + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + goto out; } - len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off); + mutex_enter(&zp->z_lock); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); - error = zfs_putapage(ip, page, io_off, len); - zfs_range_unlock(rl); + /* Preserve the mtime and ctime provided by the inode */ + ZFS_TIME_ENCODE(&ip->i_atime, atime); + ZFS_TIME_ENCODE(&ip->i_mtime, mtime); + ZFS_TIME_ENCODE(&ip->i_ctime, ctime); + zp->z_atime_dirty = 0; - if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) - zil_commit(zsb->z_log, zp->z_id); + error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); + mutex_exit(&zp->z_lock); + + dmu_tx_commit(tx); +out: ZFS_EXIT(zsb); return (error); } -EXPORT_SYMBOL(zfs_putpage); +EXPORT_SYMBOL(zfs_dirty_inode); /*ARGSUSED*/ void @@ -3856,9 +3957,10 @@ zfs_inactive(struct inode *ip) zfs_sb_t *zsb = ITOZSB(ip); int error; -#ifdef HAVE_SNAPSHOT - /* Early return for snapshot inode? */ -#endif /* HAVE_SNAPSHOT */ + if (zfsctl_is_node(ip)) { + zfsctl_inode_inactive(ip); + return; + } rw_enter(&zsb->z_teardown_inactive_lock, RW_READER); if (zp->z_sa_hdl == NULL) { @@ -4135,6 +4237,17 @@ zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag, return (EINVAL); } + /* + * Permissions aren't checked on Solaris because on this OS + * zfs_space() can only be called with an opened file handle. + * On Linux we can get here through truncate_range() which + * operates directly on inodes, so we need to check access rights. + */ + if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) { + ZFS_EXIT(zsb); + return (error); + } + off = bfp->l_start; len = bfp->l_len; /* 0 means from off to end of file */