GNU Linux-libre 6.7.9-gnu
[releases.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <linux/vmalloc.h>
19 #include <uapi/linux/virtio_ring.h>
20
21 #define PART_BITS 4
22 #define VQ_NAME_LEN 16
23 #define MAX_DISCARD_SEGMENTS 256u
24
25 /* The maximum number of sg elements that fit into a virtqueue */
26 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
27
28 #ifdef CONFIG_ARCH_NO_SG_CHAIN
29 #define VIRTIO_BLK_INLINE_SG_CNT        0
30 #else
31 #define VIRTIO_BLK_INLINE_SG_CNT        2
32 #endif
33
34 static unsigned int num_request_queues;
35 module_param(num_request_queues, uint, 0644);
36 MODULE_PARM_DESC(num_request_queues,
37                  "Limit the number of request queues to use for blk device. "
38                  "0 for no limit. "
39                  "Values > nr_cpu_ids truncated to nr_cpu_ids.");
40
41 static unsigned int poll_queues;
42 module_param(poll_queues, uint, 0644);
43 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
44
45 static int major;
46 static DEFINE_IDA(vd_index_ida);
47
48 static struct workqueue_struct *virtblk_wq;
49
50 struct virtio_blk_vq {
51         struct virtqueue *vq;
52         spinlock_t lock;
53         char name[VQ_NAME_LEN];
54 } ____cacheline_aligned_in_smp;
55
56 struct virtio_blk {
57         /*
58          * This mutex must be held by anything that may run after
59          * virtblk_remove() sets vblk->vdev to NULL.
60          *
61          * blk-mq, virtqueue processing, and sysfs attribute code paths are
62          * shut down before vblk->vdev is set to NULL and therefore do not need
63          * to hold this mutex.
64          */
65         struct mutex vdev_mutex;
66         struct virtio_device *vdev;
67
68         /* The disk structure for the kernel. */
69         struct gendisk *disk;
70
71         /* Block layer tags. */
72         struct blk_mq_tag_set tag_set;
73
74         /* Process context for config space updates */
75         struct work_struct config_work;
76
77         /* Ida index - used to track minor number allocations. */
78         int index;
79
80         /* num of vqs */
81         int num_vqs;
82         int io_queues[HCTX_MAX_TYPES];
83         struct virtio_blk_vq *vqs;
84
85         /* For zoned device */
86         unsigned int zone_sectors;
87 };
88
89 struct virtblk_req {
90         /* Out header */
91         struct virtio_blk_outhdr out_hdr;
92
93         /* In header */
94         union {
95                 u8 status;
96
97                 /*
98                  * The zone append command has an extended in header.
99                  * The status field in zone_append_in_hdr must always
100                  * be the last byte.
101                  */
102                 struct {
103                         __virtio64 sector;
104                         u8 status;
105                 } zone_append;
106         } in_hdr;
107
108         size_t in_hdr_len;
109
110         struct sg_table sg_table;
111         struct scatterlist sg[];
112 };
113
114 static inline blk_status_t virtblk_result(u8 status)
115 {
116         switch (status) {
117         case VIRTIO_BLK_S_OK:
118                 return BLK_STS_OK;
119         case VIRTIO_BLK_S_UNSUPP:
120                 return BLK_STS_NOTSUPP;
121         case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
122                 return BLK_STS_ZONE_OPEN_RESOURCE;
123         case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE:
124                 return BLK_STS_ZONE_ACTIVE_RESOURCE;
125         case VIRTIO_BLK_S_IOERR:
126         case VIRTIO_BLK_S_ZONE_UNALIGNED_WP:
127         default:
128                 return BLK_STS_IOERR;
129         }
130 }
131
132 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
133 {
134         struct virtio_blk *vblk = hctx->queue->queuedata;
135         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
136
137         return vq;
138 }
139
140 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
141 {
142         struct scatterlist out_hdr, in_hdr, *sgs[3];
143         unsigned int num_out = 0, num_in = 0;
144
145         sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
146         sgs[num_out++] = &out_hdr;
147
148         if (vbr->sg_table.nents) {
149                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
150                         sgs[num_out++] = vbr->sg_table.sgl;
151                 else
152                         sgs[num_out + num_in++] = vbr->sg_table.sgl;
153         }
154
155         sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
156         sgs[num_out + num_in++] = &in_hdr;
157
158         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
159 }
160
161 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
162 {
163         unsigned short segments = blk_rq_nr_discard_segments(req);
164         unsigned short n = 0;
165         struct virtio_blk_discard_write_zeroes *range;
166         struct bio *bio;
167         u32 flags = 0;
168
169         if (unmap)
170                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
171
172         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
173         if (!range)
174                 return -ENOMEM;
175
176         /*
177          * Single max discard segment means multi-range discard isn't
178          * supported, and block layer only runs contiguity merge like
179          * normal RW request. So we can't reply on bio for retrieving
180          * each range info.
181          */
182         if (queue_max_discard_segments(req->q) == 1) {
183                 range[0].flags = cpu_to_le32(flags);
184                 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
185                 range[0].sector = cpu_to_le64(blk_rq_pos(req));
186                 n = 1;
187         } else {
188                 __rq_for_each_bio(bio, req) {
189                         u64 sector = bio->bi_iter.bi_sector;
190                         u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
191
192                         range[n].flags = cpu_to_le32(flags);
193                         range[n].num_sectors = cpu_to_le32(num_sectors);
194                         range[n].sector = cpu_to_le64(sector);
195                         n++;
196                 }
197         }
198
199         WARN_ON_ONCE(n != segments);
200
201         bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
202         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
203
204         return 0;
205 }
206
207 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
208 {
209         if (blk_rq_nr_phys_segments(req))
210                 sg_free_table_chained(&vbr->sg_table,
211                                       VIRTIO_BLK_INLINE_SG_CNT);
212 }
213
214 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
215                 struct virtblk_req *vbr)
216 {
217         int err;
218
219         if (!blk_rq_nr_phys_segments(req))
220                 return 0;
221
222         vbr->sg_table.sgl = vbr->sg;
223         err = sg_alloc_table_chained(&vbr->sg_table,
224                                      blk_rq_nr_phys_segments(req),
225                                      vbr->sg_table.sgl,
226                                      VIRTIO_BLK_INLINE_SG_CNT);
227         if (unlikely(err))
228                 return -ENOMEM;
229
230         return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
231 }
232
233 static void virtblk_cleanup_cmd(struct request *req)
234 {
235         if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
236                 kfree(bvec_virt(&req->special_vec));
237 }
238
239 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
240                                       struct request *req,
241                                       struct virtblk_req *vbr)
242 {
243         size_t in_hdr_len = sizeof(vbr->in_hdr.status);
244         bool unmap = false;
245         u32 type;
246         u64 sector = 0;
247
248         if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
249                 return BLK_STS_NOTSUPP;
250
251         /* Set fields for all request types */
252         vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
253
254         switch (req_op(req)) {
255         case REQ_OP_READ:
256                 type = VIRTIO_BLK_T_IN;
257                 sector = blk_rq_pos(req);
258                 break;
259         case REQ_OP_WRITE:
260                 type = VIRTIO_BLK_T_OUT;
261                 sector = blk_rq_pos(req);
262                 break;
263         case REQ_OP_FLUSH:
264                 type = VIRTIO_BLK_T_FLUSH;
265                 break;
266         case REQ_OP_DISCARD:
267                 type = VIRTIO_BLK_T_DISCARD;
268                 break;
269         case REQ_OP_WRITE_ZEROES:
270                 type = VIRTIO_BLK_T_WRITE_ZEROES;
271                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
272                 break;
273         case REQ_OP_SECURE_ERASE:
274                 type = VIRTIO_BLK_T_SECURE_ERASE;
275                 break;
276         case REQ_OP_ZONE_OPEN:
277                 type = VIRTIO_BLK_T_ZONE_OPEN;
278                 sector = blk_rq_pos(req);
279                 break;
280         case REQ_OP_ZONE_CLOSE:
281                 type = VIRTIO_BLK_T_ZONE_CLOSE;
282                 sector = blk_rq_pos(req);
283                 break;
284         case REQ_OP_ZONE_FINISH:
285                 type = VIRTIO_BLK_T_ZONE_FINISH;
286                 sector = blk_rq_pos(req);
287                 break;
288         case REQ_OP_ZONE_APPEND:
289                 type = VIRTIO_BLK_T_ZONE_APPEND;
290                 sector = blk_rq_pos(req);
291                 in_hdr_len = sizeof(vbr->in_hdr.zone_append);
292                 break;
293         case REQ_OP_ZONE_RESET:
294                 type = VIRTIO_BLK_T_ZONE_RESET;
295                 sector = blk_rq_pos(req);
296                 break;
297         case REQ_OP_ZONE_RESET_ALL:
298                 type = VIRTIO_BLK_T_ZONE_RESET_ALL;
299                 break;
300         case REQ_OP_DRV_IN:
301                 /*
302                  * Out header has already been prepared by the caller (virtblk_get_id()
303                  * or virtblk_submit_zone_report()), nothing to do here.
304                  */
305                 return 0;
306         default:
307                 WARN_ON_ONCE(1);
308                 return BLK_STS_IOERR;
309         }
310
311         /* Set fields for non-REQ_OP_DRV_IN request types */
312         vbr->in_hdr_len = in_hdr_len;
313         vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
314         vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
315
316         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
317             type == VIRTIO_BLK_T_SECURE_ERASE) {
318                 if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
319                         return BLK_STS_RESOURCE;
320         }
321
322         return 0;
323 }
324
325 /*
326  * The status byte is always the last byte of the virtblk request
327  * in-header. This helper fetches its value for all in-header formats
328  * that are currently defined.
329  */
330 static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
331 {
332         return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
333 }
334
335 static inline void virtblk_request_done(struct request *req)
336 {
337         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
338         blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
339         struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
340
341         virtblk_unmap_data(req, vbr);
342         virtblk_cleanup_cmd(req);
343
344         if (req_op(req) == REQ_OP_ZONE_APPEND)
345                 req->__sector = virtio64_to_cpu(vblk->vdev,
346                                                 vbr->in_hdr.zone_append.sector);
347
348         blk_mq_end_request(req, status);
349 }
350
351 static void virtblk_done(struct virtqueue *vq)
352 {
353         struct virtio_blk *vblk = vq->vdev->priv;
354         bool req_done = false;
355         int qid = vq->index;
356         struct virtblk_req *vbr;
357         unsigned long flags;
358         unsigned int len;
359
360         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
361         do {
362                 virtqueue_disable_cb(vq);
363                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
364                         struct request *req = blk_mq_rq_from_pdu(vbr);
365
366                         if (likely(!blk_should_fake_timeout(req->q)))
367                                 blk_mq_complete_request(req);
368                         req_done = true;
369                 }
370                 if (unlikely(virtqueue_is_broken(vq)))
371                         break;
372         } while (!virtqueue_enable_cb(vq));
373
374         /* In case queue is stopped waiting for more buffers. */
375         if (req_done)
376                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
377         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
378 }
379
380 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
381 {
382         struct virtio_blk *vblk = hctx->queue->queuedata;
383         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
384         bool kick;
385
386         spin_lock_irq(&vq->lock);
387         kick = virtqueue_kick_prepare(vq->vq);
388         spin_unlock_irq(&vq->lock);
389
390         if (kick)
391                 virtqueue_notify(vq->vq);
392 }
393
394 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
395 {
396         virtblk_cleanup_cmd(req);
397         switch (rc) {
398         case -ENOSPC:
399                 return BLK_STS_DEV_RESOURCE;
400         case -ENOMEM:
401                 return BLK_STS_RESOURCE;
402         default:
403                 return BLK_STS_IOERR;
404         }
405 }
406
407 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
408                                         struct virtio_blk *vblk,
409                                         struct request *req,
410                                         struct virtblk_req *vbr)
411 {
412         blk_status_t status;
413         int num;
414
415         status = virtblk_setup_cmd(vblk->vdev, req, vbr);
416         if (unlikely(status))
417                 return status;
418
419         num = virtblk_map_data(hctx, req, vbr);
420         if (unlikely(num < 0))
421                 return virtblk_fail_to_queue(req, -ENOMEM);
422         vbr->sg_table.nents = num;
423
424         blk_mq_start_request(req);
425
426         return BLK_STS_OK;
427 }
428
429 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
430                            const struct blk_mq_queue_data *bd)
431 {
432         struct virtio_blk *vblk = hctx->queue->queuedata;
433         struct request *req = bd->rq;
434         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
435         unsigned long flags;
436         int qid = hctx->queue_num;
437         bool notify = false;
438         blk_status_t status;
439         int err;
440
441         status = virtblk_prep_rq(hctx, vblk, req, vbr);
442         if (unlikely(status))
443                 return status;
444
445         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
446         err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
447         if (err) {
448                 virtqueue_kick(vblk->vqs[qid].vq);
449                 /* Don't stop the queue if -ENOMEM: we may have failed to
450                  * bounce the buffer due to global resource outage.
451                  */
452                 if (err == -ENOSPC)
453                         blk_mq_stop_hw_queue(hctx);
454                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
455                 virtblk_unmap_data(req, vbr);
456                 return virtblk_fail_to_queue(req, err);
457         }
458
459         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
460                 notify = true;
461         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
462
463         if (notify)
464                 virtqueue_notify(vblk->vqs[qid].vq);
465         return BLK_STS_OK;
466 }
467
468 static bool virtblk_prep_rq_batch(struct request *req)
469 {
470         struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
471         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
472
473         return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
474 }
475
476 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
477                                         struct request **rqlist)
478 {
479         unsigned long flags;
480         int err;
481         bool kick;
482
483         spin_lock_irqsave(&vq->lock, flags);
484
485         while (!rq_list_empty(*rqlist)) {
486                 struct request *req = rq_list_pop(rqlist);
487                 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
488
489                 err = virtblk_add_req(vq->vq, vbr);
490                 if (err) {
491                         virtblk_unmap_data(req, vbr);
492                         virtblk_cleanup_cmd(req);
493                         blk_mq_requeue_request(req, true);
494                 }
495         }
496
497         kick = virtqueue_kick_prepare(vq->vq);
498         spin_unlock_irqrestore(&vq->lock, flags);
499
500         return kick;
501 }
502
503 static void virtio_queue_rqs(struct request **rqlist)
504 {
505         struct request *req, *next, *prev = NULL;
506         struct request *requeue_list = NULL;
507
508         rq_list_for_each_safe(rqlist, req, next) {
509                 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
510                 bool kick;
511
512                 if (!virtblk_prep_rq_batch(req)) {
513                         rq_list_move(rqlist, &requeue_list, req, prev);
514                         req = prev;
515                         if (!req)
516                                 continue;
517                 }
518
519                 if (!next || req->mq_hctx != next->mq_hctx) {
520                         req->rq_next = NULL;
521                         kick = virtblk_add_req_batch(vq, rqlist);
522                         if (kick)
523                                 virtqueue_notify(vq->vq);
524
525                         *rqlist = next;
526                         prev = NULL;
527                 } else
528                         prev = req;
529         }
530
531         *rqlist = requeue_list;
532 }
533
534 #ifdef CONFIG_BLK_DEV_ZONED
535 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
536                                           unsigned int nr_zones,
537                                           size_t *buflen)
538 {
539         struct request_queue *q = vblk->disk->queue;
540         size_t bufsize;
541         void *buf;
542
543         nr_zones = min_t(unsigned int, nr_zones,
544                          get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
545
546         bufsize = sizeof(struct virtio_blk_zone_report) +
547                 nr_zones * sizeof(struct virtio_blk_zone_descriptor);
548         bufsize = min_t(size_t, bufsize,
549                         queue_max_hw_sectors(q) << SECTOR_SHIFT);
550         bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
551
552         while (bufsize >= sizeof(struct virtio_blk_zone_report)) {
553                 buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
554                 if (buf) {
555                         *buflen = bufsize;
556                         return buf;
557                 }
558                 bufsize >>= 1;
559         }
560
561         return NULL;
562 }
563
564 static int virtblk_submit_zone_report(struct virtio_blk *vblk,
565                                        char *report_buf, size_t report_len,
566                                        sector_t sector)
567 {
568         struct request_queue *q = vblk->disk->queue;
569         struct request *req;
570         struct virtblk_req *vbr;
571         int err;
572
573         req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
574         if (IS_ERR(req))
575                 return PTR_ERR(req);
576
577         vbr = blk_mq_rq_to_pdu(req);
578         vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
579         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
580         vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
581
582         err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
583         if (err)
584                 goto out;
585
586         blk_execute_rq(req, false);
587         err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
588 out:
589         blk_mq_free_request(req);
590         return err;
591 }
592
593 static int virtblk_parse_zone(struct virtio_blk *vblk,
594                                struct virtio_blk_zone_descriptor *entry,
595                                unsigned int idx, report_zones_cb cb, void *data)
596 {
597         struct blk_zone zone = { };
598
599         zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
600         if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
601                 zone.len = vblk->zone_sectors;
602         else
603                 zone.len = get_capacity(vblk->disk) - zone.start;
604         zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
605         zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
606
607         switch (entry->z_type) {
608         case VIRTIO_BLK_ZT_SWR:
609                 zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
610                 break;
611         case VIRTIO_BLK_ZT_SWP:
612                 zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
613                 break;
614         case VIRTIO_BLK_ZT_CONV:
615                 zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
616                 break;
617         default:
618                 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
619                         zone.start, entry->z_type);
620                 return -EIO;
621         }
622
623         switch (entry->z_state) {
624         case VIRTIO_BLK_ZS_EMPTY:
625                 zone.cond = BLK_ZONE_COND_EMPTY;
626                 break;
627         case VIRTIO_BLK_ZS_CLOSED:
628                 zone.cond = BLK_ZONE_COND_CLOSED;
629                 break;
630         case VIRTIO_BLK_ZS_FULL:
631                 zone.cond = BLK_ZONE_COND_FULL;
632                 zone.wp = zone.start + zone.len;
633                 break;
634         case VIRTIO_BLK_ZS_EOPEN:
635                 zone.cond = BLK_ZONE_COND_EXP_OPEN;
636                 break;
637         case VIRTIO_BLK_ZS_IOPEN:
638                 zone.cond = BLK_ZONE_COND_IMP_OPEN;
639                 break;
640         case VIRTIO_BLK_ZS_NOT_WP:
641                 zone.cond = BLK_ZONE_COND_NOT_WP;
642                 break;
643         case VIRTIO_BLK_ZS_RDONLY:
644                 zone.cond = BLK_ZONE_COND_READONLY;
645                 zone.wp = ULONG_MAX;
646                 break;
647         case VIRTIO_BLK_ZS_OFFLINE:
648                 zone.cond = BLK_ZONE_COND_OFFLINE;
649                 zone.wp = ULONG_MAX;
650                 break;
651         default:
652                 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
653                         zone.start, entry->z_state);
654                 return -EIO;
655         }
656
657         /*
658          * The callback below checks the validity of the reported
659          * entry data, no need to further validate it here.
660          */
661         return cb(&zone, idx, data);
662 }
663
664 static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
665                                  unsigned int nr_zones, report_zones_cb cb,
666                                  void *data)
667 {
668         struct virtio_blk *vblk = disk->private_data;
669         struct virtio_blk_zone_report *report;
670         unsigned long long nz, i;
671         size_t buflen;
672         unsigned int zone_idx = 0;
673         int ret;
674
675         if (WARN_ON_ONCE(!vblk->zone_sectors))
676                 return -EOPNOTSUPP;
677
678         report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
679         if (!report)
680                 return -ENOMEM;
681
682         mutex_lock(&vblk->vdev_mutex);
683
684         if (!vblk->vdev) {
685                 ret = -ENXIO;
686                 goto fail_report;
687         }
688
689         while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
690                 memset(report, 0, buflen);
691
692                 ret = virtblk_submit_zone_report(vblk, (char *)report,
693                                                  buflen, sector);
694                 if (ret)
695                         goto fail_report;
696
697                 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
698                            nr_zones);
699                 if (!nz)
700                         break;
701
702                 for (i = 0; i < nz && zone_idx < nr_zones; i++) {
703                         ret = virtblk_parse_zone(vblk, &report->zones[i],
704                                                  zone_idx, cb, data);
705                         if (ret)
706                                 goto fail_report;
707
708                         sector = virtio64_to_cpu(vblk->vdev,
709                                                  report->zones[i].z_start) +
710                                  vblk->zone_sectors;
711                         zone_idx++;
712                 }
713         }
714
715         if (zone_idx > 0)
716                 ret = zone_idx;
717         else
718                 ret = -EINVAL;
719 fail_report:
720         mutex_unlock(&vblk->vdev_mutex);
721         kvfree(report);
722         return ret;
723 }
724
725 static void virtblk_revalidate_zones(struct virtio_blk *vblk)
726 {
727         u8 model;
728
729         virtio_cread(vblk->vdev, struct virtio_blk_config,
730                      zoned.model, &model);
731         switch (model) {
732         default:
733                 dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
734                 fallthrough;
735         case VIRTIO_BLK_Z_NONE:
736         case VIRTIO_BLK_Z_HA:
737                 disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
738                 return;
739         case VIRTIO_BLK_Z_HM:
740                 WARN_ON_ONCE(!vblk->zone_sectors);
741                 if (!blk_revalidate_disk_zones(vblk->disk, NULL))
742                         set_capacity_and_notify(vblk->disk, 0);
743         }
744 }
745
746 static int virtblk_probe_zoned_device(struct virtio_device *vdev,
747                                        struct virtio_blk *vblk,
748                                        struct request_queue *q)
749 {
750         u32 v, wg;
751         u8 model;
752
753         virtio_cread(vdev, struct virtio_blk_config,
754                      zoned.model, &model);
755
756         switch (model) {
757         case VIRTIO_BLK_Z_NONE:
758         case VIRTIO_BLK_Z_HA:
759                 /* Present the host-aware device as non-zoned */
760                 return 0;
761         case VIRTIO_BLK_Z_HM:
762                 break;
763         default:
764                 dev_err(&vdev->dev, "unsupported zone model %d\n", model);
765                 return -EINVAL;
766         }
767
768         dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
769
770         disk_set_zoned(vblk->disk, BLK_ZONED_HM);
771         blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
772
773         virtio_cread(vdev, struct virtio_blk_config,
774                      zoned.max_open_zones, &v);
775         disk_set_max_open_zones(vblk->disk, v);
776         dev_dbg(&vdev->dev, "max open zones = %u\n", v);
777
778         virtio_cread(vdev, struct virtio_blk_config,
779                      zoned.max_active_zones, &v);
780         disk_set_max_active_zones(vblk->disk, v);
781         dev_dbg(&vdev->dev, "max active zones = %u\n", v);
782
783         virtio_cread(vdev, struct virtio_blk_config,
784                      zoned.write_granularity, &wg);
785         if (!wg) {
786                 dev_warn(&vdev->dev, "zero write granularity reported\n");
787                 return -ENODEV;
788         }
789         blk_queue_physical_block_size(q, wg);
790         blk_queue_io_min(q, wg);
791
792         dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
793
794         /*
795          * virtio ZBD specification doesn't require zones to be a power of
796          * two sectors in size, but the code in this driver expects that.
797          */
798         virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
799                      &vblk->zone_sectors);
800         if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
801                 dev_err(&vdev->dev,
802                         "zoned device with non power of two zone size %u\n",
803                         vblk->zone_sectors);
804                 return -ENODEV;
805         }
806         blk_queue_chunk_sectors(q, vblk->zone_sectors);
807         dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
808
809         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
810                 dev_warn(&vblk->vdev->dev,
811                          "ignoring negotiated F_DISCARD for zoned device\n");
812                 blk_queue_max_discard_sectors(q, 0);
813         }
814
815         virtio_cread(vdev, struct virtio_blk_config,
816                      zoned.max_append_sectors, &v);
817         if (!v) {
818                 dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
819                 return -ENODEV;
820         }
821         if ((v << SECTOR_SHIFT) < wg) {
822                 dev_err(&vdev->dev,
823                         "write granularity %u exceeds max_append_sectors %u limit\n",
824                         wg, v);
825                 return -ENODEV;
826         }
827         blk_queue_max_zone_append_sectors(q, v);
828         dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
829
830         return blk_revalidate_disk_zones(vblk->disk, NULL);
831 }
832
833 #else
834
835 /*
836  * Zoned block device support is not configured in this kernel.
837  * Host-managed zoned devices can't be supported, but others are
838  * good to go as regular block devices.
839  */
840 #define virtblk_report_zones       NULL
841
842 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
843 {
844 }
845
846 static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
847                         struct virtio_blk *vblk, struct request_queue *q)
848 {
849         u8 model;
850
851         virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
852         if (model == VIRTIO_BLK_Z_HM) {
853                 dev_err(&vdev->dev,
854                         "virtio_blk: zoned devices are not supported");
855                 return -EOPNOTSUPP;
856         }
857
858         return 0;
859 }
860 #endif /* CONFIG_BLK_DEV_ZONED */
861
862 /* return id (s/n) string for *disk to *id_str
863  */
864 static int virtblk_get_id(struct gendisk *disk, char *id_str)
865 {
866         struct virtio_blk *vblk = disk->private_data;
867         struct request_queue *q = vblk->disk->queue;
868         struct request *req;
869         struct virtblk_req *vbr;
870         int err;
871
872         req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
873         if (IS_ERR(req))
874                 return PTR_ERR(req);
875
876         vbr = blk_mq_rq_to_pdu(req);
877         vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
878         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
879         vbr->out_hdr.sector = 0;
880
881         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
882         if (err)
883                 goto out;
884
885         blk_execute_rq(req, false);
886         err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
887 out:
888         blk_mq_free_request(req);
889         return err;
890 }
891
892 /* We provide getgeo only to please some old bootloader/partitioning tools */
893 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
894 {
895         struct virtio_blk *vblk = bd->bd_disk->private_data;
896         int ret = 0;
897
898         mutex_lock(&vblk->vdev_mutex);
899
900         if (!vblk->vdev) {
901                 ret = -ENXIO;
902                 goto out;
903         }
904
905         /* see if the host passed in geometry config */
906         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
907                 virtio_cread(vblk->vdev, struct virtio_blk_config,
908                              geometry.cylinders, &geo->cylinders);
909                 virtio_cread(vblk->vdev, struct virtio_blk_config,
910                              geometry.heads, &geo->heads);
911                 virtio_cread(vblk->vdev, struct virtio_blk_config,
912                              geometry.sectors, &geo->sectors);
913         } else {
914                 /* some standard values, similar to sd */
915                 geo->heads = 1 << 6;
916                 geo->sectors = 1 << 5;
917                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
918         }
919 out:
920         mutex_unlock(&vblk->vdev_mutex);
921         return ret;
922 }
923
924 static void virtblk_free_disk(struct gendisk *disk)
925 {
926         struct virtio_blk *vblk = disk->private_data;
927
928         ida_free(&vd_index_ida, vblk->index);
929         mutex_destroy(&vblk->vdev_mutex);
930         kfree(vblk);
931 }
932
933 static const struct block_device_operations virtblk_fops = {
934         .owner          = THIS_MODULE,
935         .getgeo         = virtblk_getgeo,
936         .free_disk      = virtblk_free_disk,
937         .report_zones   = virtblk_report_zones,
938 };
939
940 static int index_to_minor(int index)
941 {
942         return index << PART_BITS;
943 }
944
945 static int minor_to_index(int minor)
946 {
947         return minor >> PART_BITS;
948 }
949
950 static ssize_t serial_show(struct device *dev,
951                            struct device_attribute *attr, char *buf)
952 {
953         struct gendisk *disk = dev_to_disk(dev);
954         int err;
955
956         /* sysfs gives us a PAGE_SIZE buffer */
957         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
958
959         buf[VIRTIO_BLK_ID_BYTES] = '\0';
960         err = virtblk_get_id(disk, buf);
961         if (!err)
962                 return strlen(buf);
963
964         if (err == -EIO) /* Unsupported? Make it empty. */
965                 return 0;
966
967         return err;
968 }
969
970 static DEVICE_ATTR_RO(serial);
971
972 /* The queue's logical block size must be set before calling this */
973 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
974 {
975         struct virtio_device *vdev = vblk->vdev;
976         struct request_queue *q = vblk->disk->queue;
977         char cap_str_2[10], cap_str_10[10];
978         unsigned long long nblocks;
979         u64 capacity;
980
981         /* Host must always specify the capacity. */
982         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
983
984         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
985
986         string_get_size(nblocks, queue_logical_block_size(q),
987                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
988         string_get_size(nblocks, queue_logical_block_size(q),
989                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
990
991         dev_notice(&vdev->dev,
992                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
993                    vblk->disk->disk_name,
994                    resize ? "new size: " : "",
995                    nblocks,
996                    queue_logical_block_size(q),
997                    cap_str_10,
998                    cap_str_2);
999
1000         set_capacity_and_notify(vblk->disk, capacity);
1001 }
1002
1003 static void virtblk_config_changed_work(struct work_struct *work)
1004 {
1005         struct virtio_blk *vblk =
1006                 container_of(work, struct virtio_blk, config_work);
1007
1008         virtblk_revalidate_zones(vblk);
1009         virtblk_update_capacity(vblk, true);
1010 }
1011
1012 static void virtblk_config_changed(struct virtio_device *vdev)
1013 {
1014         struct virtio_blk *vblk = vdev->priv;
1015
1016         queue_work(virtblk_wq, &vblk->config_work);
1017 }
1018
1019 static int init_vq(struct virtio_blk *vblk)
1020 {
1021         int err;
1022         unsigned short i;
1023         vq_callback_t **callbacks;
1024         const char **names;
1025         struct virtqueue **vqs;
1026         unsigned short num_vqs;
1027         unsigned short num_poll_vqs;
1028         struct virtio_device *vdev = vblk->vdev;
1029         struct irq_affinity desc = { 0, };
1030
1031         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
1032                                    struct virtio_blk_config, num_queues,
1033                                    &num_vqs);
1034         if (err)
1035                 num_vqs = 1;
1036
1037         if (!err && !num_vqs) {
1038                 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
1039                 return -EINVAL;
1040         }
1041
1042         num_vqs = min_t(unsigned int,
1043                         min_not_zero(num_request_queues, nr_cpu_ids),
1044                         num_vqs);
1045
1046         num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
1047
1048         vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
1049         vblk->io_queues[HCTX_TYPE_READ] = 0;
1050         vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
1051
1052         dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
1053                                 vblk->io_queues[HCTX_TYPE_DEFAULT],
1054                                 vblk->io_queues[HCTX_TYPE_READ],
1055                                 vblk->io_queues[HCTX_TYPE_POLL]);
1056
1057         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
1058         if (!vblk->vqs)
1059                 return -ENOMEM;
1060
1061         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
1062         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
1063         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
1064         if (!names || !callbacks || !vqs) {
1065                 err = -ENOMEM;
1066                 goto out;
1067         }
1068
1069         for (i = 0; i < num_vqs - num_poll_vqs; i++) {
1070                 callbacks[i] = virtblk_done;
1071                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
1072                 names[i] = vblk->vqs[i].name;
1073         }
1074
1075         for (; i < num_vqs; i++) {
1076                 callbacks[i] = NULL;
1077                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
1078                 names[i] = vblk->vqs[i].name;
1079         }
1080
1081         /* Discover virtqueues and write information to configuration.  */
1082         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
1083         if (err)
1084                 goto out;
1085
1086         for (i = 0; i < num_vqs; i++) {
1087                 spin_lock_init(&vblk->vqs[i].lock);
1088                 vblk->vqs[i].vq = vqs[i];
1089         }
1090         vblk->num_vqs = num_vqs;
1091
1092 out:
1093         kfree(vqs);
1094         kfree(callbacks);
1095         kfree(names);
1096         if (err)
1097                 kfree(vblk->vqs);
1098         return err;
1099 }
1100
1101 /*
1102  * Legacy naming scheme used for virtio devices.  We are stuck with it for
1103  * virtio blk but don't ever use it for any new driver.
1104  */
1105 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
1106 {
1107         const int base = 'z' - 'a' + 1;
1108         char *begin = buf + strlen(prefix);
1109         char *end = buf + buflen;
1110         char *p;
1111         int unit;
1112
1113         p = end - 1;
1114         *p = '\0';
1115         unit = base;
1116         do {
1117                 if (p == begin)
1118                         return -EINVAL;
1119                 *--p = 'a' + (index % unit);
1120                 index = (index / unit) - 1;
1121         } while (index >= 0);
1122
1123         memmove(begin, p, end - p);
1124         memcpy(buf, prefix, strlen(prefix));
1125
1126         return 0;
1127 }
1128
1129 static int virtblk_get_cache_mode(struct virtio_device *vdev)
1130 {
1131         u8 writeback;
1132         int err;
1133
1134         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
1135                                    struct virtio_blk_config, wce,
1136                                    &writeback);
1137
1138         /*
1139          * If WCE is not configurable and flush is not available,
1140          * assume no writeback cache is in use.
1141          */
1142         if (err)
1143                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
1144
1145         return writeback;
1146 }
1147
1148 static void virtblk_update_cache_mode(struct virtio_device *vdev)
1149 {
1150         u8 writeback = virtblk_get_cache_mode(vdev);
1151         struct virtio_blk *vblk = vdev->priv;
1152
1153         blk_queue_write_cache(vblk->disk->queue, writeback, false);
1154 }
1155
1156 static const char *const virtblk_cache_types[] = {
1157         "write through", "write back"
1158 };
1159
1160 static ssize_t
1161 cache_type_store(struct device *dev, struct device_attribute *attr,
1162                  const char *buf, size_t count)
1163 {
1164         struct gendisk *disk = dev_to_disk(dev);
1165         struct virtio_blk *vblk = disk->private_data;
1166         struct virtio_device *vdev = vblk->vdev;
1167         int i;
1168
1169         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
1170         i = sysfs_match_string(virtblk_cache_types, buf);
1171         if (i < 0)
1172                 return i;
1173
1174         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
1175         virtblk_update_cache_mode(vdev);
1176         return count;
1177 }
1178
1179 static ssize_t
1180 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1181 {
1182         struct gendisk *disk = dev_to_disk(dev);
1183         struct virtio_blk *vblk = disk->private_data;
1184         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
1185
1186         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
1187         return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
1188 }
1189
1190 static DEVICE_ATTR_RW(cache_type);
1191
1192 static struct attribute *virtblk_attrs[] = {
1193         &dev_attr_serial.attr,
1194         &dev_attr_cache_type.attr,
1195         NULL,
1196 };
1197
1198 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
1199                 struct attribute *a, int n)
1200 {
1201         struct device *dev = kobj_to_dev(kobj);
1202         struct gendisk *disk = dev_to_disk(dev);
1203         struct virtio_blk *vblk = disk->private_data;
1204         struct virtio_device *vdev = vblk->vdev;
1205
1206         if (a == &dev_attr_cache_type.attr &&
1207             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
1208                 return S_IRUGO;
1209
1210         return a->mode;
1211 }
1212
1213 static const struct attribute_group virtblk_attr_group = {
1214         .attrs = virtblk_attrs,
1215         .is_visible = virtblk_attrs_are_visible,
1216 };
1217
1218 static const struct attribute_group *virtblk_attr_groups[] = {
1219         &virtblk_attr_group,
1220         NULL,
1221 };
1222
1223 static void virtblk_map_queues(struct blk_mq_tag_set *set)
1224 {
1225         struct virtio_blk *vblk = set->driver_data;
1226         int i, qoff;
1227
1228         for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1229                 struct blk_mq_queue_map *map = &set->map[i];
1230
1231                 map->nr_queues = vblk->io_queues[i];
1232                 map->queue_offset = qoff;
1233                 qoff += map->nr_queues;
1234
1235                 if (map->nr_queues == 0)
1236                         continue;
1237
1238                 /*
1239                  * Regular queues have interrupts and hence CPU affinity is
1240                  * defined by the core virtio code, but polling queues have
1241                  * no interrupts so we let the block layer assign CPU affinity.
1242                  */
1243                 if (i == HCTX_TYPE_POLL)
1244                         blk_mq_map_queues(&set->map[i]);
1245                 else
1246                         blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
1247         }
1248 }
1249
1250 static void virtblk_complete_batch(struct io_comp_batch *iob)
1251 {
1252         struct request *req;
1253
1254         rq_list_for_each(&iob->req_list, req) {
1255                 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
1256                 virtblk_cleanup_cmd(req);
1257         }
1258         blk_mq_end_request_batch(iob);
1259 }
1260
1261 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1262 {
1263         struct virtio_blk *vblk = hctx->queue->queuedata;
1264         struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
1265         struct virtblk_req *vbr;
1266         unsigned long flags;
1267         unsigned int len;
1268         int found = 0;
1269
1270         spin_lock_irqsave(&vq->lock, flags);
1271
1272         while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
1273                 struct request *req = blk_mq_rq_from_pdu(vbr);
1274
1275                 found++;
1276                 if (!blk_mq_complete_request_remote(req) &&
1277                     !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
1278                                                 virtblk_complete_batch))
1279                         virtblk_request_done(req);
1280         }
1281
1282         if (found)
1283                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
1284
1285         spin_unlock_irqrestore(&vq->lock, flags);
1286
1287         return found;
1288 }
1289
1290 static const struct blk_mq_ops virtio_mq_ops = {
1291         .queue_rq       = virtio_queue_rq,
1292         .queue_rqs      = virtio_queue_rqs,
1293         .commit_rqs     = virtio_commit_rqs,
1294         .complete       = virtblk_request_done,
1295         .map_queues     = virtblk_map_queues,
1296         .poll           = virtblk_poll,
1297 };
1298
1299 static unsigned int virtblk_queue_depth;
1300 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
1301
1302 static int virtblk_probe(struct virtio_device *vdev)
1303 {
1304         struct virtio_blk *vblk;
1305         struct request_queue *q;
1306         int err, index;
1307
1308         u32 v, blk_size, max_size, sg_elems, opt_io_size;
1309         u32 max_discard_segs = 0;
1310         u32 discard_granularity = 0;
1311         u16 min_io_size;
1312         u8 physical_block_exp, alignment_offset;
1313         unsigned int queue_depth;
1314         size_t max_dma_size;
1315
1316         if (!vdev->config->get) {
1317                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1318                         __func__);
1319                 return -EINVAL;
1320         }
1321
1322         err = ida_alloc_range(&vd_index_ida, 0,
1323                               minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
1324         if (err < 0)
1325                 goto out;
1326         index = err;
1327
1328         /* We need to know how many segments before we allocate. */
1329         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
1330                                    struct virtio_blk_config, seg_max,
1331                                    &sg_elems);
1332
1333         /* We need at least one SG element, whatever they say. */
1334         if (err || !sg_elems)
1335                 sg_elems = 1;
1336
1337         /* Prevent integer overflows and honor max vq size */
1338         sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
1339
1340         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
1341         if (!vblk) {
1342                 err = -ENOMEM;
1343                 goto out_free_index;
1344         }
1345
1346         mutex_init(&vblk->vdev_mutex);
1347
1348         vblk->vdev = vdev;
1349
1350         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
1351
1352         err = init_vq(vblk);
1353         if (err)
1354                 goto out_free_vblk;
1355
1356         /* Default queue sizing is to fill the ring. */
1357         if (!virtblk_queue_depth) {
1358                 queue_depth = vblk->vqs[0].vq->num_free;
1359                 /* ... but without indirect descs, we use 2 descs per req */
1360                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
1361                         queue_depth /= 2;
1362         } else {
1363                 queue_depth = virtblk_queue_depth;
1364         }
1365
1366         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
1367         vblk->tag_set.ops = &virtio_mq_ops;
1368         vblk->tag_set.queue_depth = queue_depth;
1369         vblk->tag_set.numa_node = NUMA_NO_NODE;
1370         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1371         vblk->tag_set.cmd_size =
1372                 sizeof(struct virtblk_req) +
1373                 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
1374         vblk->tag_set.driver_data = vblk;
1375         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
1376         vblk->tag_set.nr_maps = 1;
1377         if (vblk->io_queues[HCTX_TYPE_POLL])
1378                 vblk->tag_set.nr_maps = 3;
1379
1380         err = blk_mq_alloc_tag_set(&vblk->tag_set);
1381         if (err)
1382                 goto out_free_vq;
1383
1384         vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
1385         if (IS_ERR(vblk->disk)) {
1386                 err = PTR_ERR(vblk->disk);
1387                 goto out_free_tags;
1388         }
1389         q = vblk->disk->queue;
1390
1391         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
1392
1393         vblk->disk->major = major;
1394         vblk->disk->first_minor = index_to_minor(index);
1395         vblk->disk->minors = 1 << PART_BITS;
1396         vblk->disk->private_data = vblk;
1397         vblk->disk->fops = &virtblk_fops;
1398         vblk->index = index;
1399
1400         /* configure queue flush support */
1401         virtblk_update_cache_mode(vdev);
1402
1403         /* If disk is read-only in the host, the guest should obey */
1404         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
1405                 set_disk_ro(vblk->disk, 1);
1406
1407         /* We can handle whatever the host told us to handle. */
1408         blk_queue_max_segments(q, sg_elems);
1409
1410         /* No real sector limit. */
1411         blk_queue_max_hw_sectors(q, UINT_MAX);
1412
1413         max_dma_size = virtio_max_dma_size(vdev);
1414         max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
1415
1416         /* Host can optionally specify maximum segment size and number of
1417          * segments. */
1418         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
1419                                    struct virtio_blk_config, size_max, &v);
1420         if (!err)
1421                 max_size = min(max_size, v);
1422
1423         blk_queue_max_segment_size(q, max_size);
1424
1425         /* Host can optionally specify the block size of the device */
1426         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1427                                    struct virtio_blk_config, blk_size,
1428                                    &blk_size);
1429         if (!err) {
1430                 err = blk_validate_block_size(blk_size);
1431                 if (err) {
1432                         dev_err(&vdev->dev,
1433                                 "virtio_blk: invalid block size: 0x%x\n",
1434                                 blk_size);
1435                         goto out_cleanup_disk;
1436                 }
1437
1438                 blk_queue_logical_block_size(q, blk_size);
1439         } else
1440                 blk_size = queue_logical_block_size(q);
1441
1442         /* Use topology information if available */
1443         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1444                                    struct virtio_blk_config, physical_block_exp,
1445                                    &physical_block_exp);
1446         if (!err && physical_block_exp)
1447                 blk_queue_physical_block_size(q,
1448                                 blk_size * (1 << physical_block_exp));
1449
1450         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1451                                    struct virtio_blk_config, alignment_offset,
1452                                    &alignment_offset);
1453         if (!err && alignment_offset)
1454                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1455
1456         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1457                                    struct virtio_blk_config, min_io_size,
1458                                    &min_io_size);
1459         if (!err && min_io_size)
1460                 blk_queue_io_min(q, blk_size * min_io_size);
1461
1462         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1463                                    struct virtio_blk_config, opt_io_size,
1464                                    &opt_io_size);
1465         if (!err && opt_io_size)
1466                 blk_queue_io_opt(q, blk_size * opt_io_size);
1467
1468         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1469                 virtio_cread(vdev, struct virtio_blk_config,
1470                              discard_sector_alignment, &discard_granularity);
1471
1472                 virtio_cread(vdev, struct virtio_blk_config,
1473                              max_discard_sectors, &v);
1474                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1475
1476                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1477                              &max_discard_segs);
1478         }
1479
1480         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1481                 virtio_cread(vdev, struct virtio_blk_config,
1482                              max_write_zeroes_sectors, &v);
1483                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1484         }
1485
1486         /* The discard and secure erase limits are combined since the Linux
1487          * block layer uses the same limit for both commands.
1488          *
1489          * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
1490          * are negotiated, we will use the minimum between the limits.
1491          *
1492          * discard sector alignment is set to the minimum between discard_sector_alignment
1493          * and secure_erase_sector_alignment.
1494          *
1495          * max discard sectors is set to the minimum between max_discard_seg and
1496          * max_secure_erase_seg.
1497          */
1498         if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1499
1500                 virtio_cread(vdev, struct virtio_blk_config,
1501                              secure_erase_sector_alignment, &v);
1502
1503                 /* secure_erase_sector_alignment should not be zero, the device should set a
1504                  * valid number of sectors.
1505                  */
1506                 if (!v) {
1507                         dev_err(&vdev->dev,
1508                                 "virtio_blk: secure_erase_sector_alignment can't be 0\n");
1509                         err = -EINVAL;
1510                         goto out_cleanup_disk;
1511                 }
1512
1513                 discard_granularity = min_not_zero(discard_granularity, v);
1514
1515                 virtio_cread(vdev, struct virtio_blk_config,
1516                              max_secure_erase_sectors, &v);
1517
1518                 /* max_secure_erase_sectors should not be zero, the device should set a
1519                  * valid number of sectors.
1520                  */
1521                 if (!v) {
1522                         dev_err(&vdev->dev,
1523                                 "virtio_blk: max_secure_erase_sectors can't be 0\n");
1524                         err = -EINVAL;
1525                         goto out_cleanup_disk;
1526                 }
1527
1528                 blk_queue_max_secure_erase_sectors(q, v);
1529
1530                 virtio_cread(vdev, struct virtio_blk_config,
1531                              max_secure_erase_seg, &v);
1532
1533                 /* max_secure_erase_seg should not be zero, the device should set a
1534                  * valid number of segments
1535                  */
1536                 if (!v) {
1537                         dev_err(&vdev->dev,
1538                                 "virtio_blk: max_secure_erase_seg can't be 0\n");
1539                         err = -EINVAL;
1540                         goto out_cleanup_disk;
1541                 }
1542
1543                 max_discard_segs = min_not_zero(max_discard_segs, v);
1544         }
1545
1546         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
1547             virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1548                 /* max_discard_seg and discard_granularity will be 0 only
1549                  * if max_discard_seg and discard_sector_alignment fields in the virtio
1550                  * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
1551                  * In this case, we use default values.
1552                  */
1553                 if (!max_discard_segs)
1554                         max_discard_segs = sg_elems;
1555
1556                 blk_queue_max_discard_segments(q,
1557                                                min(max_discard_segs, MAX_DISCARD_SEGMENTS));
1558
1559                 if (discard_granularity)
1560                         q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
1561                 else
1562                         q->limits.discard_granularity = blk_size;
1563         }
1564
1565         virtblk_update_capacity(vblk, false);
1566         virtio_device_ready(vdev);
1567
1568         /*
1569          * All steps that follow use the VQs therefore they need to be
1570          * placed after the virtio_device_ready() call above.
1571          */
1572         if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
1573                 err = virtblk_probe_zoned_device(vdev, vblk, q);
1574                 if (err)
1575                         goto out_cleanup_disk;
1576         }
1577
1578         err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1579         if (err)
1580                 goto out_cleanup_disk;
1581
1582         return 0;
1583
1584 out_cleanup_disk:
1585         put_disk(vblk->disk);
1586 out_free_tags:
1587         blk_mq_free_tag_set(&vblk->tag_set);
1588 out_free_vq:
1589         vdev->config->del_vqs(vdev);
1590         kfree(vblk->vqs);
1591 out_free_vblk:
1592         kfree(vblk);
1593 out_free_index:
1594         ida_free(&vd_index_ida, index);
1595 out:
1596         return err;
1597 }
1598
1599 static void virtblk_remove(struct virtio_device *vdev)
1600 {
1601         struct virtio_blk *vblk = vdev->priv;
1602
1603         /* Make sure no work handler is accessing the device. */
1604         flush_work(&vblk->config_work);
1605
1606         del_gendisk(vblk->disk);
1607         blk_mq_free_tag_set(&vblk->tag_set);
1608
1609         mutex_lock(&vblk->vdev_mutex);
1610
1611         /* Stop all the virtqueues. */
1612         virtio_reset_device(vdev);
1613
1614         /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1615         vblk->vdev = NULL;
1616
1617         vdev->config->del_vqs(vdev);
1618         kfree(vblk->vqs);
1619
1620         mutex_unlock(&vblk->vdev_mutex);
1621
1622         put_disk(vblk->disk);
1623 }
1624
1625 #ifdef CONFIG_PM_SLEEP
1626 static int virtblk_freeze(struct virtio_device *vdev)
1627 {
1628         struct virtio_blk *vblk = vdev->priv;
1629
1630         /* Ensure no requests in virtqueues before deleting vqs. */
1631         blk_mq_freeze_queue(vblk->disk->queue);
1632
1633         /* Ensure we don't receive any more interrupts */
1634         virtio_reset_device(vdev);
1635
1636         /* Make sure no work handler is accessing the device. */
1637         flush_work(&vblk->config_work);
1638
1639         vdev->config->del_vqs(vdev);
1640         kfree(vblk->vqs);
1641
1642         return 0;
1643 }
1644
1645 static int virtblk_restore(struct virtio_device *vdev)
1646 {
1647         struct virtio_blk *vblk = vdev->priv;
1648         int ret;
1649
1650         ret = init_vq(vdev->priv);
1651         if (ret)
1652                 return ret;
1653
1654         virtio_device_ready(vdev);
1655
1656         blk_mq_unfreeze_queue(vblk->disk->queue);
1657         return 0;
1658 }
1659 #endif
1660
1661 static const struct virtio_device_id id_table[] = {
1662         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1663         { 0 },
1664 };
1665
1666 static unsigned int features_legacy[] = {
1667         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1668         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1669         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1670         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1671         VIRTIO_BLK_F_SECURE_ERASE,
1672 }
1673 ;
1674 static unsigned int features[] = {
1675         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1676         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1677         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1678         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1679         VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
1680 };
1681
1682 static struct virtio_driver virtio_blk = {
1683         .feature_table                  = features,
1684         .feature_table_size             = ARRAY_SIZE(features),
1685         .feature_table_legacy           = features_legacy,
1686         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
1687         .driver.name                    = KBUILD_MODNAME,
1688         .driver.owner                   = THIS_MODULE,
1689         .id_table                       = id_table,
1690         .probe                          = virtblk_probe,
1691         .remove                         = virtblk_remove,
1692         .config_changed                 = virtblk_config_changed,
1693 #ifdef CONFIG_PM_SLEEP
1694         .freeze                         = virtblk_freeze,
1695         .restore                        = virtblk_restore,
1696 #endif
1697 };
1698
1699 static int __init virtio_blk_init(void)
1700 {
1701         int error;
1702
1703         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1704         if (!virtblk_wq)
1705                 return -ENOMEM;
1706
1707         major = register_blkdev(0, "virtblk");
1708         if (major < 0) {
1709                 error = major;
1710                 goto out_destroy_workqueue;
1711         }
1712
1713         error = register_virtio_driver(&virtio_blk);
1714         if (error)
1715                 goto out_unregister_blkdev;
1716         return 0;
1717
1718 out_unregister_blkdev:
1719         unregister_blkdev(major, "virtblk");
1720 out_destroy_workqueue:
1721         destroy_workqueue(virtblk_wq);
1722         return error;
1723 }
1724
1725 static void __exit virtio_blk_fini(void)
1726 {
1727         unregister_virtio_driver(&virtio_blk);
1728         unregister_blkdev(major, "virtblk");
1729         destroy_workqueue(virtblk_wq);
1730 }
1731 module_init(virtio_blk_init);
1732 module_exit(virtio_blk_fini);
1733
1734 MODULE_DEVICE_TABLE(virtio, id_table);
1735 MODULE_DESCRIPTION("Virtio block driver");
1736 MODULE_LICENSE("GPL");