return (wrote);
}
+static loff_t
+zpl_llseek(struct file *filp, loff_t offset, int whence)
+{
+#if defined(SEEK_HOLE) && defined(SEEK_DATA)
+ if (whence == SEEK_DATA || whence == SEEK_HOLE) {
+ struct inode *ip = filp->f_mapping->host;
+ loff_t maxbytes = ip->i_sb->s_maxbytes;
+ loff_t error;
+
+ spl_inode_lock(ip);
+ error = -zfs_holey(ip, whence, &offset);
+ if (error == 0)
+ error = lseek_execute(filp, ip, offset, maxbytes);
+ spl_inode_unlock(ip);
+
+ return (error);
+ }
+#endif /* SEEK_HOLE && SEEK_DATA */
+
+ return generic_file_llseek(filp, offset, whence);
+}
+
/*
* It's worth taking a moment to describe how mmap is implemented
* for zfs because it differs considerably from other Linux filesystems.
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
+ ASSERT(!(current->flags & PF_NOFS));
/*
- * Disable the normal reclaim path for zpl_putpage(). This
- * ensures that all memory allocations under this call path
- * will never enter direct reclaim. If this were to happen
- * the VM might try to write out additional pages by calling
- * zpl_putpage() again resulting in a deadlock.
+ * Annotate this call path with a flag that indicates that it is
+ * unsafe to use KM_SLEEP during memory allocations due to the
+ * potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
- if (current->flags & PF_MEMALLOC) {
- (void) zfs_putpage(mapping->host, pp, wbc);
- } else {
- current->flags |= PF_MEMALLOC;
- (void) zfs_putpage(mapping->host, pp, wbc);
- current->flags &= ~PF_MEMALLOC;
- }
+ current->flags |= PF_NOFS;
+ (void) zfs_putpage(mapping->host, pp, wbc);
+ current->flags &= ~PF_NOFS;
return (0);
}
return zpl_putpage(pp, wbc, pp->mapping);
}
+/*
+ * The only flag combination which matches the behavior of zfs_space()
+ * is FALLOC_FL_PUNCH_HOLE. This flag was introduced in the 2.6.38 kernel.
+ */
+long
+zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
+{
+ cred_t *cr = CRED();
+ int error = -EOPNOTSUPP;
+
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ return (-EOPNOTSUPP);
+
+ crhold(cr);
+
+#ifdef FALLOC_FL_PUNCH_HOLE
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ flock64_t bf;
+
+ bf.l_type = F_WRLCK;
+ bf.l_whence = 0;
+ bf.l_start = offset;
+ bf.l_len = len;
+ bf.l_pid = 0;
+
+ error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
+ }
+#endif /* FALLOC_FL_PUNCH_HOLE */
+
+ crfree(cr);
+
+ ASSERT3S(error, <=, 0);
+ return (error);
+}
+
+#ifdef HAVE_FILE_FALLOCATE
+static long
+zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
+{
+ return zpl_fallocate_common(filp->f_path.dentry->d_inode,
+ mode, offset, len);
+}
+#endif /* HAVE_FILE_FALLOCATE */
+
+static long
+zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ZFS_IOC_GETFLAGS:
+ case ZFS_IOC_SETFLAGS:
+ return (-EOPNOTSUPP);
+ default:
+ return (-ENOTTY);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long
+zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return zpl_ioctl(filp, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
const struct address_space_operations zpl_address_space_operations = {
.readpages = zpl_readpages,
.readpage = zpl_readpage,
const struct file_operations zpl_file_operations = {
.open = zpl_open,
.release = zpl_release,
- .llseek = generic_file_llseek,
+ .llseek = zpl_llseek,
.read = zpl_read,
.write = zpl_write,
- .readdir = zpl_readdir,
.mmap = zpl_mmap,
.fsync = zpl_fsync,
+#ifdef HAVE_FILE_FALLOCATE
+ .fallocate = zpl_fallocate,
+#endif /* HAVE_FILE_FALLOCATE */
+ .unlocked_ioctl = zpl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zpl_compat_ioctl,
+#endif
};
const struct file_operations zpl_dir_file_operations = {
.read = generic_read_dir,
.readdir = zpl_readdir,
.fsync = zpl_fsync,
+ .unlocked_ioctl = zpl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zpl_compat_ioctl,
+#endif
};