1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <scsi/scsi_cmnd.h>
15 #include <linux/idr.h>
16 #include <linux/blk-mq.h>
17 #include <linux/blk-mq-virtio.h>
18 #include <linux/numa.h>
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
25 static DEFINE_IDA(vd_index_ida);
27 static struct workqueue_struct *virtblk_wq;
29 struct virtio_blk_vq {
32 char name[VQ_NAME_LEN];
33 } ____cacheline_aligned_in_smp;
37 * This mutex must be held by anything that may run after
38 * virtblk_remove() sets vblk->vdev to NULL.
40 * blk-mq, virtqueue processing, and sysfs attribute code paths are
41 * shut down before vblk->vdev is set to NULL and therefore do not need
44 struct mutex vdev_mutex;
45 struct virtio_device *vdev;
47 /* The disk structure for the kernel. */
50 /* Block layer tags. */
51 struct blk_mq_tag_set tag_set;
53 /* Process context for config space updates */
54 struct work_struct config_work;
57 * Tracks references from block_device_operations open/release and
58 * virtio_driver probe/remove so this object can be freed once no
63 /* What host tells us, plus 2 for header & tailer. */
64 unsigned int sg_elems;
66 /* Ida index - used to track minor number allocations. */
71 struct virtio_blk_vq *vqs;
75 #ifdef CONFIG_VIRTIO_BLK_SCSI
76 struct scsi_request sreq; /* for SCSI passthrough, must be first */
77 u8 sense[SCSI_SENSE_BUFFERSIZE];
78 struct virtio_scsi_inhdr in_hdr;
80 struct virtio_blk_outhdr out_hdr;
82 struct scatterlist sg[];
85 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
87 switch (vbr->status) {
90 case VIRTIO_BLK_S_UNSUPP:
91 return BLK_STS_NOTSUPP;
98 * If this is a packet command we need a couple of additional headers. Behind
99 * the normal outhdr we put a segment with the scsi command block, and before
100 * the normal inhdr we put the sense data and the inhdr with additional status
103 #ifdef CONFIG_VIRTIO_BLK_SCSI
104 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
105 struct scatterlist *data_sg, bool have_data)
107 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
108 unsigned int num_out = 0, num_in = 0;
110 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
111 sgs[num_out++] = &hdr;
112 sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
113 sgs[num_out++] = &cmd;
116 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
117 sgs[num_out++] = data_sg;
119 sgs[num_out + num_in++] = data_sg;
122 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
123 sgs[num_out + num_in++] = &sense;
124 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
125 sgs[num_out + num_in++] = &inhdr;
126 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
127 sgs[num_out + num_in++] = &status;
129 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
132 static inline void virtblk_scsi_request_done(struct request *req)
134 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
135 struct virtio_blk *vblk = req->q->queuedata;
136 struct scsi_request *sreq = &vbr->sreq;
138 sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
139 sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
140 sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
143 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
144 unsigned int cmd, unsigned long data)
146 struct gendisk *disk = bdev->bd_disk;
147 struct virtio_blk *vblk = disk->private_data;
150 * Only allow the generic SCSI ioctls if the host can support it.
152 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
155 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
156 (void __user *)data);
159 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
160 struct virtblk_req *vbr, struct scatterlist *data_sg,
165 static inline void virtblk_scsi_request_done(struct request *req)
168 #define virtblk_ioctl NULL
169 #endif /* CONFIG_VIRTIO_BLK_SCSI */
171 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
172 struct scatterlist *data_sg, bool have_data)
174 struct scatterlist hdr, status, *sgs[3];
175 unsigned int num_out = 0, num_in = 0;
177 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
178 sgs[num_out++] = &hdr;
181 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
182 sgs[num_out++] = data_sg;
184 sgs[num_out + num_in++] = data_sg;
187 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
188 sgs[num_out + num_in++] = &status;
190 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
193 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
195 unsigned short segments = blk_rq_nr_discard_segments(req);
196 unsigned short n = 0;
197 struct virtio_blk_discard_write_zeroes *range;
202 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
204 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
209 * Single max discard segment means multi-range discard isn't
210 * supported, and block layer only runs contiguity merge like
211 * normal RW request. So we can't reply on bio for retrieving
214 if (queue_max_discard_segments(req->q) == 1) {
215 range[0].flags = cpu_to_le32(flags);
216 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
217 range[0].sector = cpu_to_le64(blk_rq_pos(req));
220 __rq_for_each_bio(bio, req) {
221 u64 sector = bio->bi_iter.bi_sector;
222 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
224 range[n].flags = cpu_to_le32(flags);
225 range[n].num_sectors = cpu_to_le32(num_sectors);
226 range[n].sector = cpu_to_le64(sector);
231 WARN_ON_ONCE(n != segments);
233 req->special_vec.bv_page = virt_to_page(range);
234 req->special_vec.bv_offset = offset_in_page(range);
235 req->special_vec.bv_len = sizeof(*range) * segments;
236 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
241 static inline void virtblk_request_done(struct request *req)
243 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
245 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
246 kfree(page_address(req->special_vec.bv_page) +
247 req->special_vec.bv_offset);
250 switch (req_op(req)) {
252 case REQ_OP_SCSI_OUT:
253 virtblk_scsi_request_done(req);
257 blk_mq_end_request(req, virtblk_result(vbr));
260 static void virtblk_done(struct virtqueue *vq)
262 struct virtio_blk *vblk = vq->vdev->priv;
263 bool req_done = false;
265 struct virtblk_req *vbr;
269 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
271 virtqueue_disable_cb(vq);
272 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
273 struct request *req = blk_mq_rq_from_pdu(vbr);
275 blk_mq_complete_request(req);
278 if (unlikely(virtqueue_is_broken(vq)))
280 } while (!virtqueue_enable_cb(vq));
282 /* In case queue is stopped waiting for more buffers. */
284 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
285 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
288 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
290 struct virtio_blk *vblk = hctx->queue->queuedata;
291 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
294 spin_lock_irq(&vq->lock);
295 kick = virtqueue_kick_prepare(vq->vq);
296 spin_unlock_irq(&vq->lock);
299 virtqueue_notify(vq->vq);
302 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
303 const struct blk_mq_queue_data *bd)
305 struct virtio_blk *vblk = hctx->queue->queuedata;
306 struct request *req = bd->rq;
307 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
310 int qid = hctx->queue_num;
316 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
318 switch (req_op(req)) {
324 type = VIRTIO_BLK_T_FLUSH;
327 type = VIRTIO_BLK_T_DISCARD;
329 case REQ_OP_WRITE_ZEROES:
330 type = VIRTIO_BLK_T_WRITE_ZEROES;
331 unmap = !(req->cmd_flags & REQ_NOUNMAP);
334 case REQ_OP_SCSI_OUT:
335 type = VIRTIO_BLK_T_SCSI_CMD;
338 type = VIRTIO_BLK_T_GET_ID;
342 return BLK_STS_IOERR;
345 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
346 vbr->out_hdr.sector = type ?
347 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
348 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
350 blk_mq_start_request(req);
352 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
353 err = virtblk_setup_discard_write_zeroes(req, unmap);
355 return BLK_STS_RESOURCE;
358 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
360 if (rq_data_dir(req) == WRITE)
361 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
363 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
366 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
367 if (blk_rq_is_scsi(req))
368 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
370 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
372 virtqueue_kick(vblk->vqs[qid].vq);
373 /* Don't stop the queue if -ENOMEM: we may have failed to
374 * bounce the buffer due to global resource outage.
377 blk_mq_stop_hw_queue(hctx);
378 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
381 return BLK_STS_DEV_RESOURCE;
383 return BLK_STS_RESOURCE;
385 return BLK_STS_IOERR;
389 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
391 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
394 virtqueue_notify(vblk->vqs[qid].vq);
398 /* return id (s/n) string for *disk to *id_str
400 static int virtblk_get_id(struct gendisk *disk, char *id_str)
402 struct virtio_blk *vblk = disk->private_data;
403 struct request_queue *q = vblk->disk->queue;
407 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
411 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
415 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
416 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
418 blk_put_request(req);
422 static void virtblk_get(struct virtio_blk *vblk)
424 refcount_inc(&vblk->refs);
427 static void virtblk_put(struct virtio_blk *vblk)
429 if (refcount_dec_and_test(&vblk->refs)) {
430 ida_simple_remove(&vd_index_ida, vblk->index);
431 mutex_destroy(&vblk->vdev_mutex);
436 static int virtblk_open(struct block_device *bd, fmode_t mode)
438 struct virtio_blk *vblk = bd->bd_disk->private_data;
441 mutex_lock(&vblk->vdev_mutex);
448 mutex_unlock(&vblk->vdev_mutex);
452 static void virtblk_release(struct gendisk *disk, fmode_t mode)
454 struct virtio_blk *vblk = disk->private_data;
459 /* We provide getgeo only to please some old bootloader/partitioning tools */
460 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
462 struct virtio_blk *vblk = bd->bd_disk->private_data;
465 mutex_lock(&vblk->vdev_mutex);
472 /* see if the host passed in geometry config */
473 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
474 virtio_cread(vblk->vdev, struct virtio_blk_config,
475 geometry.cylinders, &geo->cylinders);
476 virtio_cread(vblk->vdev, struct virtio_blk_config,
477 geometry.heads, &geo->heads);
478 virtio_cread(vblk->vdev, struct virtio_blk_config,
479 geometry.sectors, &geo->sectors);
481 /* some standard values, similar to sd */
483 geo->sectors = 1 << 5;
484 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
487 mutex_unlock(&vblk->vdev_mutex);
491 static const struct block_device_operations virtblk_fops = {
492 .ioctl = virtblk_ioctl,
493 .owner = THIS_MODULE,
494 .open = virtblk_open,
495 .release = virtblk_release,
496 .getgeo = virtblk_getgeo,
499 static int index_to_minor(int index)
501 return index << PART_BITS;
504 static int minor_to_index(int minor)
506 return minor >> PART_BITS;
509 static ssize_t serial_show(struct device *dev,
510 struct device_attribute *attr, char *buf)
512 struct gendisk *disk = dev_to_disk(dev);
515 /* sysfs gives us a PAGE_SIZE buffer */
516 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
518 buf[VIRTIO_BLK_ID_BYTES] = '\0';
519 err = virtblk_get_id(disk, buf);
523 if (err == -EIO) /* Unsupported? Make it empty. */
529 static DEVICE_ATTR_RO(serial);
531 /* The queue's logical block size must be set before calling this */
532 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
534 struct virtio_device *vdev = vblk->vdev;
535 struct request_queue *q = vblk->disk->queue;
536 char cap_str_2[10], cap_str_10[10];
537 unsigned long long nblocks;
540 /* Host must always specify the capacity. */
541 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
543 /* If capacity is too big, truncate with warning. */
544 if ((sector_t)capacity != capacity) {
545 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
546 (unsigned long long)capacity);
547 capacity = (sector_t)-1;
550 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
552 string_get_size(nblocks, queue_logical_block_size(q),
553 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
554 string_get_size(nblocks, queue_logical_block_size(q),
555 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
557 dev_notice(&vdev->dev,
558 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
559 vblk->disk->disk_name,
560 resize ? "new size: " : "",
562 queue_logical_block_size(q),
566 set_capacity(vblk->disk, capacity);
569 static void virtblk_config_changed_work(struct work_struct *work)
571 struct virtio_blk *vblk =
572 container_of(work, struct virtio_blk, config_work);
573 char *envp[] = { "RESIZE=1", NULL };
575 virtblk_update_capacity(vblk, true);
576 revalidate_disk(vblk->disk);
577 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
580 static void virtblk_config_changed(struct virtio_device *vdev)
582 struct virtio_blk *vblk = vdev->priv;
584 queue_work(virtblk_wq, &vblk->config_work);
587 static int init_vq(struct virtio_blk *vblk)
591 vq_callback_t **callbacks;
593 struct virtqueue **vqs;
594 unsigned short num_vqs;
595 struct virtio_device *vdev = vblk->vdev;
596 struct irq_affinity desc = { 0, };
598 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
599 struct virtio_blk_config, num_queues,
604 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
606 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
610 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
611 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
612 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
613 if (!names || !callbacks || !vqs) {
618 for (i = 0; i < num_vqs; i++) {
619 callbacks[i] = virtblk_done;
620 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
621 names[i] = vblk->vqs[i].name;
624 /* Discover virtqueues and write information to configuration. */
625 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
629 for (i = 0; i < num_vqs; i++) {
630 spin_lock_init(&vblk->vqs[i].lock);
631 vblk->vqs[i].vq = vqs[i];
633 vblk->num_vqs = num_vqs;
645 * Legacy naming scheme used for virtio devices. We are stuck with it for
646 * virtio blk but don't ever use it for any new driver.
648 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
650 const int base = 'z' - 'a' + 1;
651 char *begin = buf + strlen(prefix);
652 char *end = buf + buflen;
662 *--p = 'a' + (index % unit);
663 index = (index / unit) - 1;
664 } while (index >= 0);
666 memmove(begin, p, end - p);
667 memcpy(buf, prefix, strlen(prefix));
672 static int virtblk_get_cache_mode(struct virtio_device *vdev)
677 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
678 struct virtio_blk_config, wce,
682 * If WCE is not configurable and flush is not available,
683 * assume no writeback cache is in use.
686 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
691 static void virtblk_update_cache_mode(struct virtio_device *vdev)
693 u8 writeback = virtblk_get_cache_mode(vdev);
694 struct virtio_blk *vblk = vdev->priv;
696 blk_queue_write_cache(vblk->disk->queue, writeback, false);
697 revalidate_disk(vblk->disk);
700 static const char *const virtblk_cache_types[] = {
701 "write through", "write back"
705 cache_type_store(struct device *dev, struct device_attribute *attr,
706 const char *buf, size_t count)
708 struct gendisk *disk = dev_to_disk(dev);
709 struct virtio_blk *vblk = disk->private_data;
710 struct virtio_device *vdev = vblk->vdev;
713 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
714 i = sysfs_match_string(virtblk_cache_types, buf);
718 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
719 virtblk_update_cache_mode(vdev);
724 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
726 struct gendisk *disk = dev_to_disk(dev);
727 struct virtio_blk *vblk = disk->private_data;
728 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
730 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
731 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
734 static DEVICE_ATTR_RW(cache_type);
736 static struct attribute *virtblk_attrs[] = {
737 &dev_attr_serial.attr,
738 &dev_attr_cache_type.attr,
742 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
743 struct attribute *a, int n)
745 struct device *dev = container_of(kobj, struct device, kobj);
746 struct gendisk *disk = dev_to_disk(dev);
747 struct virtio_blk *vblk = disk->private_data;
748 struct virtio_device *vdev = vblk->vdev;
750 if (a == &dev_attr_cache_type.attr &&
751 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
757 static const struct attribute_group virtblk_attr_group = {
758 .attrs = virtblk_attrs,
759 .is_visible = virtblk_attrs_are_visible,
762 static const struct attribute_group *virtblk_attr_groups[] = {
767 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
768 unsigned int hctx_idx, unsigned int numa_node)
770 struct virtio_blk *vblk = set->driver_data;
771 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
773 #ifdef CONFIG_VIRTIO_BLK_SCSI
774 vbr->sreq.sense = vbr->sense;
776 sg_init_table(vbr->sg, vblk->sg_elems);
780 static int virtblk_map_queues(struct blk_mq_tag_set *set)
782 struct virtio_blk *vblk = set->driver_data;
784 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
788 #ifdef CONFIG_VIRTIO_BLK_SCSI
789 static void virtblk_initialize_rq(struct request *req)
791 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
793 scsi_req_init(&vbr->sreq);
797 static const struct blk_mq_ops virtio_mq_ops = {
798 .queue_rq = virtio_queue_rq,
799 .commit_rqs = virtio_commit_rqs,
800 .complete = virtblk_request_done,
801 .init_request = virtblk_init_request,
802 #ifdef CONFIG_VIRTIO_BLK_SCSI
803 .initialize_rq_fn = virtblk_initialize_rq,
805 .map_queues = virtblk_map_queues,
808 static unsigned int virtblk_queue_depth;
809 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
811 static int virtblk_probe(struct virtio_device *vdev)
813 struct virtio_blk *vblk;
814 struct request_queue *q;
817 u32 v, blk_size, max_size, sg_elems, opt_io_size;
819 u8 physical_block_exp, alignment_offset;
821 if (!vdev->config->get) {
822 dev_err(&vdev->dev, "%s failure: config access disabled\n",
827 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
833 /* We need to know how many segments before we allocate. */
834 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
835 struct virtio_blk_config, seg_max,
838 /* We need at least one SG element, whatever they say. */
839 if (err || !sg_elems)
842 /* We need an extra sg elements at head and tail. */
844 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
850 /* This reference is dropped in virtblk_remove(). */
851 refcount_set(&vblk->refs, 1);
852 mutex_init(&vblk->vdev_mutex);
855 vblk->sg_elems = sg_elems;
857 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
863 /* FIXME: How many partitions? How long is a piece of string? */
864 vblk->disk = alloc_disk(1 << PART_BITS);
870 /* Default queue sizing is to fill the ring. */
871 if (!virtblk_queue_depth) {
872 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
873 /* ... but without indirect descs, we use 2 descs per req */
874 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
875 virtblk_queue_depth /= 2;
878 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
879 vblk->tag_set.ops = &virtio_mq_ops;
880 vblk->tag_set.queue_depth = virtblk_queue_depth;
881 vblk->tag_set.numa_node = NUMA_NO_NODE;
882 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
883 vblk->tag_set.cmd_size =
884 sizeof(struct virtblk_req) +
885 sizeof(struct scatterlist) * sg_elems;
886 vblk->tag_set.driver_data = vblk;
887 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
889 err = blk_mq_alloc_tag_set(&vblk->tag_set);
893 q = blk_mq_init_queue(&vblk->tag_set);
898 vblk->disk->queue = q;
902 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
904 vblk->disk->major = major;
905 vblk->disk->first_minor = index_to_minor(index);
906 vblk->disk->private_data = vblk;
907 vblk->disk->fops = &virtblk_fops;
908 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
911 /* configure queue flush support */
912 virtblk_update_cache_mode(vdev);
914 /* If disk is read-only in the host, the guest should obey */
915 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
916 set_disk_ro(vblk->disk, 1);
918 /* We can handle whatever the host told us to handle. */
919 blk_queue_max_segments(q, vblk->sg_elems-2);
921 /* No real sector limit. */
922 blk_queue_max_hw_sectors(q, -1U);
924 max_size = virtio_max_dma_size(vdev);
926 /* Host can optionally specify maximum segment size and number of
928 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
929 struct virtio_blk_config, size_max, &v);
931 max_size = min(max_size, v);
933 blk_queue_max_segment_size(q, max_size);
935 /* Host can optionally specify the block size of the device */
936 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
937 struct virtio_blk_config, blk_size,
940 err = blk_validate_block_size(blk_size);
943 "virtio_blk: invalid block size: 0x%x\n",
948 blk_queue_logical_block_size(q, blk_size);
950 blk_size = queue_logical_block_size(q);
952 /* Use topology information if available */
953 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
954 struct virtio_blk_config, physical_block_exp,
955 &physical_block_exp);
956 if (!err && physical_block_exp)
957 blk_queue_physical_block_size(q,
958 blk_size * (1 << physical_block_exp));
960 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
961 struct virtio_blk_config, alignment_offset,
963 if (!err && alignment_offset)
964 blk_queue_alignment_offset(q, blk_size * alignment_offset);
966 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
967 struct virtio_blk_config, min_io_size,
969 if (!err && min_io_size)
970 blk_queue_io_min(q, blk_size * min_io_size);
972 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
973 struct virtio_blk_config, opt_io_size,
975 if (!err && opt_io_size)
976 blk_queue_io_opt(q, blk_size * opt_io_size);
978 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
979 virtio_cread(vdev, struct virtio_blk_config,
980 discard_sector_alignment, &v);
982 q->limits.discard_granularity = v << SECTOR_SHIFT;
984 q->limits.discard_granularity = blk_size;
986 virtio_cread(vdev, struct virtio_blk_config,
987 max_discard_sectors, &v);
988 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
990 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
994 * max_discard_seg == 0 is out of spec but we always
999 blk_queue_max_discard_segments(q,
1000 min(v, MAX_DISCARD_SEGMENTS));
1002 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1005 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1006 virtio_cread(vdev, struct virtio_blk_config,
1007 max_write_zeroes_sectors, &v);
1008 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1011 virtblk_update_capacity(vblk, false);
1012 virtio_device_ready(vdev);
1014 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1018 blk_mq_free_tag_set(&vblk->tag_set);
1020 put_disk(vblk->disk);
1022 vdev->config->del_vqs(vdev);
1027 ida_simple_remove(&vd_index_ida, index);
1032 static void virtblk_remove(struct virtio_device *vdev)
1034 struct virtio_blk *vblk = vdev->priv;
1036 /* Make sure no work handler is accessing the device. */
1037 flush_work(&vblk->config_work);
1039 del_gendisk(vblk->disk);
1040 blk_cleanup_queue(vblk->disk->queue);
1042 blk_mq_free_tag_set(&vblk->tag_set);
1044 mutex_lock(&vblk->vdev_mutex);
1046 /* Stop all the virtqueues. */
1047 vdev->config->reset(vdev);
1049 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1052 put_disk(vblk->disk);
1053 vdev->config->del_vqs(vdev);
1056 mutex_unlock(&vblk->vdev_mutex);
1061 #ifdef CONFIG_PM_SLEEP
1062 static int virtblk_freeze(struct virtio_device *vdev)
1064 struct virtio_blk *vblk = vdev->priv;
1066 /* Ensure we don't receive any more interrupts */
1067 vdev->config->reset(vdev);
1069 /* Make sure no work handler is accessing the device. */
1070 flush_work(&vblk->config_work);
1072 blk_mq_quiesce_queue(vblk->disk->queue);
1074 vdev->config->del_vqs(vdev);
1080 static int virtblk_restore(struct virtio_device *vdev)
1082 struct virtio_blk *vblk = vdev->priv;
1085 ret = init_vq(vdev->priv);
1089 virtio_device_ready(vdev);
1091 blk_mq_unquiesce_queue(vblk->disk->queue);
1096 static const struct virtio_device_id id_table[] = {
1097 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1101 static unsigned int features_legacy[] = {
1102 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1103 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1104 #ifdef CONFIG_VIRTIO_BLK_SCSI
1107 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1108 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1111 static unsigned int features[] = {
1112 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1113 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1114 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1115 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1118 static struct virtio_driver virtio_blk = {
1119 .feature_table = features,
1120 .feature_table_size = ARRAY_SIZE(features),
1121 .feature_table_legacy = features_legacy,
1122 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1123 .driver.name = KBUILD_MODNAME,
1124 .driver.owner = THIS_MODULE,
1125 .id_table = id_table,
1126 .probe = virtblk_probe,
1127 .remove = virtblk_remove,
1128 .config_changed = virtblk_config_changed,
1129 #ifdef CONFIG_PM_SLEEP
1130 .freeze = virtblk_freeze,
1131 .restore = virtblk_restore,
1135 static int __init init(void)
1139 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1143 major = register_blkdev(0, "virtblk");
1146 goto out_destroy_workqueue;
1149 error = register_virtio_driver(&virtio_blk);
1151 goto out_unregister_blkdev;
1154 out_unregister_blkdev:
1155 unregister_blkdev(major, "virtblk");
1156 out_destroy_workqueue:
1157 destroy_workqueue(virtblk_wq);
1161 static void __exit fini(void)
1163 unregister_virtio_driver(&virtio_blk);
1164 unregister_blkdev(major, "virtblk");
1165 destroy_workqueue(virtblk_wq);
1170 MODULE_DEVICE_TABLE(virtio, id_table);
1171 MODULE_DESCRIPTION("Virtio block driver");
1172 MODULE_LICENSE("GPL");