return (error);
}
-ZPL_FSYNC_PROTO(zpl_fsync, filp, unused_dentry, datasync)
+/*
+ * 2.6.35 API change,
+ * As of 2.6.35 the dentry argument to the .fsync() vfs hook was deemed
+ * redundant. The dentry is still accessible via filp->f_path.dentry,
+ * and we are guaranteed that filp will never be NULL.
+ *
+ * 2.6.34 API change,
+ * Prior to 2.6.34 the nfsd kernel server would pass a NULL file struct *
+ * to the .fsync() hook. For this reason, we must be careful not to use
+ * filp unconditionally in the 3 argument case.
+ */
+#ifdef HAVE_2ARGS_FSYNC
+static int
+zpl_fsync(struct file *filp, int datasync)
{
+ struct dentry *dentry = filp->f_path.dentry;
+#else
+static int
+zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
+{
+#endif /* HAVE_2ARGS_FSYNC */
cred_t *cr = CRED();
int error;
crhold(cr);
- error = -zfs_fsync(filp->f_path.dentry->d_inode, datasync, cr);
+ error = -zfs_fsync(dentry->d_inode, datasync, cr);
crfree(cr);
ASSERT3S(error, <=, 0);
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
- znode_t *zp = ITOZ(filp->f_mapping->host);
+ struct inode *ip = filp->f_mapping->host;
+ znode_t *zp = ITOZ(ip);
int error;
+ error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
+ (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
+ if (error)
+ return (error);
+
error = generic_file_mmap(filp, vma);
if (error)
return (error);
zpl_readpage(struct file *filp, struct page *pp)
{
struct inode *ip;
- loff_t off, i_size;
- size_t len, wrote;
- cred_t *cr = CRED();
- void *pb;
+ struct page *pl[1];
int error = 0;
ASSERT(PageLocked(pp));
ip = pp->mapping->host;
- off = page_offset(pp);
- i_size = i_size_read(ip);
- ASSERT3S(off, <, i_size);
-
- crhold(cr);
- len = MIN(PAGE_CACHE_SIZE, i_size - off);
-
- pb = kmap(pp);
-
- /* O_DIRECT is passed to bypass the page cache and avoid deadlock. */
- wrote = zpl_read_common(ip, pb, len, off, UIO_SYSSPACE, O_DIRECT, cr);
- if (wrote != len)
- error = -EIO;
+ pl[0] = pp;
- if (!error && (len < PAGE_CACHE_SIZE))
- memset(pb + len, 0, PAGE_CACHE_SIZE - len);
-
- kunmap(pp);
- crfree(cr);
+ error = -zfs_getpage(ip, pl, 1);
if (error) {
SetPageError(pp);
}
unlock_page(pp);
+ return error;
+}
- return (error);
+/*
+ * Populate a set of pages with data for the Linux page cache. This
+ * function will only be called for read ahead and never for demand
+ * paging. For simplicity, the code relies on read_cache_pages() to
+ * correctly lock each page for IO and call zpl_readpage().
+ */
+static int
+zpl_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ return (read_cache_pages(mapping, pages,
+ (filler_t *)zpl_readpage, filp));
+}
+
+int
+zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
+{
+ struct address_space *mapping = data;
+
+ ASSERT(PageLocked(pp));
+ ASSERT(!PageWriteback(pp));
+
+ /*
+ * Disable the normal reclaim path for zpl_putpage(). This
+ * ensures that all memory allocations under this call path
+ * will never enter direct reclaim. If this were to happen
+ * the VM might try to write out additional pages by calling
+ * zpl_putpage() again resulting in a deadlock.
+ */
+ current->flags |= PF_MEMALLOC;
+ (void) zfs_putpage(mapping->host, pp, wbc);
+ current->flags &= ~PF_MEMALLOC;
+
+ return (0);
+}
+
+static int
+zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+ return write_cache_pages(mapping, wbc, zpl_putpage, mapping);
}
/*
* support mmap(2). Mapped pages may be dirtied by memory operations
* which never call .write(). These dirty pages are kept in sync with
* the ARC buffers via this hook.
- *
- * Currently this function relies on zpl_write_common() and the O_DIRECT
- * flag to push out the page. This works but the more correct way is
- * to update zfs_putapage() to be Linux friendly and use that interface.
*/
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
- struct inode *ip;
- loff_t off, i_size;
- size_t len, read;
- cred_t *cr = CRED();
- void *pb;
- int error = 0;
-
- ASSERT(PageLocked(pp));
- ip = pp->mapping->host;
- off = page_offset(pp);
- i_size = i_size_read(ip);
-
- crhold(cr);
- len = MIN(PAGE_CACHE_SIZE, i_size - off);
-
- pb = kmap(pp);
-
- /* O_DIRECT is passed to bypass the page cache and avoid deadlock. */
- read = zpl_write_common(ip, pb, len, off, UIO_SYSSPACE, O_DIRECT, cr);
- if (read != len)
- error = -EIO;
-
- kunmap(pp);
- crfree(cr);
-
- if (error) {
- SetPageError(pp);
- ClearPageUptodate(pp);
- } else {
- ClearPageError(pp);
- SetPageUptodate(pp);
- }
-
- unlock_page(pp);
-
- return (error);
+ return zpl_putpage(pp, wbc, pp->mapping);
}
const struct address_space_operations zpl_address_space_operations = {
+ .readpages = zpl_readpages,
.readpage = zpl_readpage,
.writepage = zpl_writepage,
+ .writepages = zpl_writepages,
};
const struct file_operations zpl_file_operations = {