Annotate KM_PUSHPAGE call paths with PF_NOFS
[zfs.git] / module / zfs / zvol.c
index 0aaa268..60ff64e 100644 (file)
 #include <sys/zvol.h>
 #include <linux/blkdev_compat.h>
 
+unsigned int zvol_inhibit_dev = 0;
 unsigned int zvol_major = ZVOL_MAJOR;
-unsigned int zvol_threads = 0;
+unsigned int zvol_threads = 32;
+unsigned long zvol_max_discard_blocks = 16384;
 
 static taskq_t *zvol_taskq;
 static kmutex_t zvol_state_lock;
@@ -459,11 +461,15 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
        uint32_t blocksize = zv->zv_volblocksize;
        zilog_t *zilog = zv->zv_zilog;
        boolean_t slogging;
+       ssize_t immediate_write_sz;
 
        if (zil_replaying(zilog, tx))
                return;
 
-       slogging = spa_has_slogs(zilog->zl_spa);
+       immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
+               ? 0 : zvol_immediate_write_sz;
+       slogging = spa_has_slogs(zilog->zl_spa) &&
+               (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
 
        while (size) {
                itx_t *itx;
@@ -475,7 +481,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
                 * Unlike zfs_log_write() we can be called with
                 * up to DMU_MAX_ACCESS/2 (5MB) writes.
                 */
-               if (blocksize > zvol_immediate_write_sz && !slogging &&
+               if (blocksize > immediate_write_sz && !slogging &&
                    size >= blocksize && offset % blocksize == 0) {
                        write_state = WR_INDIRECT; /* uses dmu_sync */
                        len = blocksize;
@@ -534,6 +540,14 @@ zvol_write(void *arg)
        dmu_tx_t *tx;
        rl_t *rl;
 
+       /*
+        * Annotate this call path with a flag that indicates that it is
+        * unsafe to use KM_SLEEP during memory allocations due to the
+        * potential for a deadlock.  KM_PUSHPAGE should be used instead.
+        */
+       ASSERT(!(current->flags & PF_NOFS));
+       current->flags |= PF_NOFS;
+
        if (req->cmd_flags & VDEV_REQ_FLUSH)
                zil_commit(zv->zv_zilog, ZVOL_OBJ);
 
@@ -542,7 +556,7 @@ zvol_write(void *arg)
         */
        if (size == 0) {
                blk_end_request(req, 0, size);
-               return;
+               goto out;
        }
 
        rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
@@ -556,7 +570,7 @@ zvol_write(void *arg)
                dmu_tx_abort(tx);
                zfs_range_unlock(rl);
                blk_end_request(req, -error, size);
-               return;
+               goto out;
        }
 
        error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
@@ -572,8 +586,56 @@ zvol_write(void *arg)
                zil_commit(zv->zv_zilog, ZVOL_OBJ);
 
        blk_end_request(req, -error, size);
+out:
+       current->flags &= ~PF_NOFS;
 }
 
+#ifdef HAVE_BLK_QUEUE_DISCARD
+static void
+zvol_discard(void *arg)
+{
+       struct request *req = (struct request *)arg;
+       struct request_queue *q = req->q;
+       zvol_state_t *zv = q->queuedata;
+       uint64_t offset = blk_rq_pos(req) << 9;
+       uint64_t size = blk_rq_bytes(req);
+       int error;
+       rl_t *rl;
+
+       /*
+        * Annotate this call path with a flag that indicates that it is
+        * unsafe to use KM_SLEEP during memory allocations due to the
+        * potential for a deadlock.  KM_PUSHPAGE should be used instead.
+        */
+       ASSERT(!(current->flags & PF_NOFS));
+       current->flags |= PF_NOFS;
+
+       if (offset + size > zv->zv_volsize) {
+               blk_end_request(req, -EIO, size);
+               goto out;
+       }
+
+       if (size == 0) {
+               blk_end_request(req, 0, size);
+               goto out;
+       }
+
+       rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
+
+       error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);
+
+       /*
+        * TODO: maybe we should add the operation to the log.
+        */
+
+       zfs_range_unlock(rl);
+
+       blk_end_request(req, -error, size);
+out:
+       current->flags &= ~PF_NOFS;
+}
+#endif /* HAVE_BLK_QUEUE_DISCARD */
+
 /*
  * Common read path running under the zvol taskq context.  This function
  * is responsible for copying the requested data out of the DMU and in to
@@ -674,6 +736,13 @@ zvol_request(struct request_queue *q)
                                break;
                        }
 
+#ifdef HAVE_BLK_QUEUE_DISCARD
+                       if (req->cmd_flags & VDEV_REQ_DISCARD) {
+                               zvol_dispatch(zvol_discard, req);
+                               break;
+                       }
+#endif /* HAVE_BLK_QUEUE_DISCARD */
+
                        zvol_dispatch(zvol_write, req);
                        break;
                default:
@@ -1188,6 +1257,21 @@ __zvol_create_minor(const char *name)
 
        set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
 
+       blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
+       blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
+       blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
+       blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
+       blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
+#ifdef HAVE_BLK_QUEUE_DISCARD
+       blk_queue_max_discard_sectors(zv->zv_queue,
+           (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
+       blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
+#endif
+#ifdef HAVE_BLK_QUEUE_NONROT
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
+#endif
+
        if (zil_replay_disable)
                zil_destroy(dmu_objset_zil(os), B_FALSE);
        else
@@ -1281,6 +1365,9 @@ zvol_create_minors(const char *pool)
        spa_t *spa = NULL;
        int error = 0;
 
+       if (zvol_inhibit_dev)
+               return (0);
+
        mutex_enter(&zvol_state_lock);
        if (pool) {
                error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
@@ -1310,6 +1397,9 @@ zvol_remove_minors(const char *pool)
        zvol_state_t *zv, *zv_next;
        char *str;
 
+       if (zvol_inhibit_dev)
+               return;
+
        str = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
        if (pool) {
                (void) strncpy(str, pool, strlen(pool));
@@ -1334,9 +1424,6 @@ zvol_init(void)
 {
        int error;
 
-       if (!zvol_threads)
-               zvol_threads = num_online_cpus();
-
        zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
                                  zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
        if (zvol_taskq == NULL) {
@@ -1374,8 +1461,14 @@ zvol_fini(void)
        list_destroy(&zvol_state_list);
 }
 
+module_param(zvol_inhibit_dev, uint, 0644);
+MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
+
 module_param(zvol_major, uint, 0444);
 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
 
 module_param(zvol_threads, uint, 0444);
 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
+
+module_param(zvol_max_discard_blocks, ulong, 0444);
+MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");