Fix NULL deref when zvol_alloc() fails
[zfs.git] / module / zfs / zvol.c
index 9dda040..5d48025 100644 (file)
 #include <sys/zvol.h>
 #include <linux/blkdev_compat.h>
 
+unsigned int zvol_inhibit_dev = 0;
 unsigned int zvol_major = ZVOL_MAJOR;
-unsigned int zvol_threads = 0;
+unsigned int zvol_threads = 32;
+unsigned long zvol_max_discard_blocks = 16384;
 
 static taskq_t *zvol_taskq;
 static kmutex_t zvol_state_lock;
@@ -459,11 +461,15 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
        uint32_t blocksize = zv->zv_volblocksize;
        zilog_t *zilog = zv->zv_zilog;
        boolean_t slogging;
+       ssize_t immediate_write_sz;
 
        if (zil_replaying(zilog, tx))
                return;
 
-       slogging = spa_has_slogs(zilog->zl_spa);
+       immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
+               ? 0 : zvol_immediate_write_sz;
+       slogging = spa_has_slogs(zilog->zl_spa) &&
+               (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
 
        while (size) {
                itx_t *itx;
@@ -475,7 +481,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
                 * Unlike zfs_log_write() we can be called with
                 * up to DMU_MAX_ACCESS/2 (5MB) writes.
                 */
-               if (blocksize > zvol_immediate_write_sz && !slogging &&
+               if (blocksize > immediate_write_sz && !slogging &&
                    size >= blocksize && offset % blocksize == 0) {
                        write_state = WR_INDIRECT; /* uses dmu_sync */
                        len = blocksize;
@@ -534,6 +540,25 @@ zvol_write(void *arg)
        dmu_tx_t *tx;
        rl_t *rl;
 
+       /*
+        * Annotate this call path with a flag that indicates that it is
+        * unsafe to use KM_SLEEP during memory allocations due to the
+        * potential for a deadlock.  KM_PUSHPAGE should be used instead.
+        */
+       ASSERT(!(current->flags & PF_NOFS));
+       current->flags |= PF_NOFS;
+
+       if (req->cmd_flags & VDEV_REQ_FLUSH)
+               zil_commit(zv->zv_zilog, ZVOL_OBJ);
+
+       /*
+        * Some requests are just for flush and nothing else.
+        */
+       if (size == 0) {
+               blk_end_request(req, 0, size);
+               goto out;
+       }
+
        rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
 
        tx = dmu_tx_create(zv->zv_objset);
@@ -545,21 +570,80 @@ zvol_write(void *arg)
                dmu_tx_abort(tx);
                zfs_range_unlock(rl);
                blk_end_request(req, -error, size);
-               return;
+               goto out;
        }
 
        error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
        if (error == 0)
-               zvol_log_write(zv, tx, offset, size, rq_is_sync(req));
+               zvol_log_write(zv, tx, offset, size,
+                   req->cmd_flags & VDEV_REQ_FUA);
 
        dmu_tx_commit(tx);
        zfs_range_unlock(rl);
 
-       if (rq_is_sync(req) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
+       if ((req->cmd_flags & VDEV_REQ_FUA) ||
+           zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
                zil_commit(zv->zv_zilog, ZVOL_OBJ);
 
        blk_end_request(req, -error, size);
+out:
+       current->flags &= ~PF_NOFS;
+}
+
+#ifdef HAVE_BLK_QUEUE_DISCARD
+static void
+zvol_discard(void *arg)
+{
+       struct request *req = (struct request *)arg;
+       struct request_queue *q = req->q;
+       zvol_state_t *zv = q->queuedata;
+       uint64_t start = blk_rq_pos(req) << 9;
+       uint64_t end = start + blk_rq_bytes(req);
+       int error;
+       rl_t *rl;
+
+       /*
+        * Annotate this call path with a flag that indicates that it is
+        * unsafe to use KM_SLEEP during memory allocations due to the
+        * potential for a deadlock.  KM_PUSHPAGE should be used instead.
+        */
+       ASSERT(!(current->flags & PF_NOFS));
+       current->flags |= PF_NOFS;
+
+       if (end > zv->zv_volsize) {
+               blk_end_request(req, -EIO, blk_rq_bytes(req));
+               goto out;
+       }
+
+       /*
+        * Align the request to volume block boundaries. If we don't,
+        * then this will force dnode_free_range() to zero out the
+        * unaligned parts, which is slow (read-modify-write) and
+        * useless since we are not freeing any space by doing so.
+        */
+       start = P2ROUNDUP(start, zv->zv_volblocksize);
+       end = P2ALIGN(end, zv->zv_volblocksize);
+
+       if (start >= end) {
+               blk_end_request(req, 0, blk_rq_bytes(req));
+               goto out;
+       }
+
+       rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
+
+       error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
+
+       /*
+        * TODO: maybe we should add the operation to the log.
+        */
+
+       zfs_range_unlock(rl);
+
+       blk_end_request(req, -error, blk_rq_bytes(req));
+out:
+       current->flags &= ~PF_NOFS;
 }
+#endif /* HAVE_BLK_QUEUE_DISCARD */
 
 /*
  * Common read path running under the zvol taskq context.  This function
@@ -578,6 +662,11 @@ zvol_read(void *arg)
        int error;
        rl_t *rl;
 
+       if (size == 0) {
+               blk_end_request(req, 0, size);
+               return;
+       }
+
        rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
 
        error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
@@ -627,7 +716,7 @@ zvol_request(struct request_queue *q)
        while ((req = blk_fetch_request(q)) != NULL) {
                size = blk_rq_bytes(req);
 
-               if (blk_rq_pos(req) + blk_rq_sectors(req) >
+               if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
                    get_capacity(zv->zv_disk)) {
                        printk(KERN_INFO
                               "%s: bad access: block=%llu, count=%lu\n",
@@ -656,6 +745,13 @@ zvol_request(struct request_queue *q)
                                break;
                        }
 
+#ifdef HAVE_BLK_QUEUE_DISCARD
+                       if (req->cmd_flags & VDEV_REQ_DISCARD) {
+                               zvol_dispatch(zvol_discard, req);
+                               break;
+                       }
+#endif /* HAVE_BLK_QUEUE_DISCARD */
+
                        zvol_dispatch(zvol_write, req);
                        break;
                default:
@@ -698,7 +794,7 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
        ASSERT(zio != NULL);
        ASSERT(size != 0);
 
-       zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
+       zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
        zgd->zgd_zilog = zv->zv_zilog;
        zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
 
@@ -814,8 +910,18 @@ zvol_last_close(zvol_state_t *zv)
 {
        zil_close(zv->zv_zilog);
        zv->zv_zilog = NULL;
+
        dmu_buf_rele(zv->zv_dbuf, zvol_tag);
        zv->zv_dbuf = NULL;
+
+       /*
+        * Evict cached data
+        */
+       if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
+           !(zv->zv_flags & ZVOL_RDONLY))
+               txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
+       (void) dmu_objset_evict_dbufs(zv->zv_objset);
+
        dmu_objset_disown(zv->zv_objset, zvol_tag);
        zv->zv_objset = NULL;
 }
@@ -978,7 +1084,7 @@ zvol_probe(dev_t dev, int *part, void *arg)
 
        mutex_enter(&zvol_state_lock);
        zv = zvol_find_by_dev(dev);
-       kobj = zv ? get_disk(zv->zv_disk) : ERR_PTR(-ENOENT);
+       kobj = zv ? get_disk(zv->zv_disk) : NULL;
        mutex_exit(&zvol_state_lock);
 
        return kobj;
@@ -1053,6 +1159,7 @@ static zvol_state_t *
 zvol_alloc(dev_t dev, const char *name)
 {
        zvol_state_t *zv;
+       int error = 0;
 
        zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
        if (zv == NULL)
@@ -1062,6 +1169,21 @@ zvol_alloc(dev_t dev, const char *name)
        if (zv->zv_queue == NULL)
                goto out_kmem;
 
+#ifdef HAVE_ELEVATOR_CHANGE
+       error = elevator_change(zv->zv_queue, "noop");
+#endif /* HAVE_ELEVATOR_CHANGE */
+       if (error) {
+               printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
+                   "noop", name, error);
+               goto out_queue;
+       }
+
+#ifdef HAVE_BLK_QUEUE_FLUSH
+       blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
+#else
+       blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
+#endif /* HAVE_BLK_QUEUE_FLUSH */
+
        zv->zv_disk = alloc_disk(ZVOL_MINORS);
        if (zv->zv_disk == NULL)
                goto out_queue;
@@ -1164,14 +1286,29 @@ __zvol_create_minor(const char *name)
 
        set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
 
+       blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
+       blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
+       blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
+       blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
+       blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
+#ifdef HAVE_BLK_QUEUE_DISCARD
+       blk_queue_max_discard_sectors(zv->zv_queue,
+           (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
+       blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
+#endif
+#ifdef HAVE_BLK_QUEUE_NONROT
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
+#endif
+
        if (zil_replay_disable)
                zil_destroy(dmu_objset_zil(os), B_FALSE);
        else
                zil_replay(os, zv, zvol_replay_vector);
 
+       zv->zv_objset = NULL;
 out_dmu_objset_disown:
        dmu_objset_disown(os, zvol_tag);
-       zv->zv_objset = NULL;
 out_doi:
        kmem_free(doi, sizeof(dmu_object_info_t));
 out:
@@ -1257,6 +1394,9 @@ zvol_create_minors(const char *pool)
        spa_t *spa = NULL;
        int error = 0;
 
+       if (zvol_inhibit_dev)
+               return (0);
+
        mutex_enter(&zvol_state_lock);
        if (pool) {
                error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
@@ -1286,6 +1426,9 @@ zvol_remove_minors(const char *pool)
        zvol_state_t *zv, *zv_next;
        char *str;
 
+       if (zvol_inhibit_dev)
+               return;
+
        str = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
        if (pool) {
                (void) strncpy(str, pool, strlen(pool));
@@ -1310,9 +1453,6 @@ zvol_init(void)
 {
        int error;
 
-       if (!zvol_threads)
-               zvol_threads = num_online_cpus();
-
        zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
                                  zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
        if (zvol_taskq == NULL) {
@@ -1350,8 +1490,14 @@ zvol_fini(void)
        list_destroy(&zvol_state_list);
 }
 
+module_param(zvol_inhibit_dev, uint, 0644);
+MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
+
 module_param(zvol_major, uint, 0444);
 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
 
 module_param(zvol_threads, uint, 0444);
 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
+
+module_param(zvol_max_discard_blocks, ulong, 0444);
+MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");