4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/mutex.h>
46 #include <linux/scatterlist.h>
47 #include <linux/bitmap.h>
48 #include <linux/list.h>
51 #include <xen/xenbus.h>
52 #include <xen/grant_table.h>
53 #include <xen/events.h>
55 #include <xen/platform_pci.h>
57 #include <xen/interface/grant_table.h>
58 #include <xen/interface/io/blkif.h>
59 #include <xen/interface/io/protocols.h>
61 #include <asm/xen/hypervisor.h>
64 * The minimal size of segment supported by the block framework is PAGE_SIZE.
65 * When Linux is using a different page size than Xen, it may not be possible
66 * to put all the data in a single segment.
67 * This can happen when the backend doesn't support indirect descriptor and
68 * therefore the maximum amount of data that a request can carry is
69 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
71 * Note that we only support one extra request. So the Linux page size
72 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
75 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
78 BLKIF_STATE_DISCONNECTED,
79 BLKIF_STATE_CONNECTED,
80 BLKIF_STATE_SUSPENDED,
87 struct list_head node;
99 struct blkif_request req;
100 struct request *request;
101 struct grant **grants_used;
102 struct grant **indirect_grants;
103 struct scatterlist *sg;
105 enum blk_req_status status;
107 #define NO_ASSOCIATED_ID ~0UL
109 * Id of the sibling if we ever need 2 requests when handling a
112 unsigned long associated_id;
120 static DEFINE_MUTEX(blkfront_mutex);
121 static const struct block_device_operations xlvbd_block_fops;
124 * Maximum number of segments in indirect requests, the actual value used by
125 * the frontend driver is the minimum of this value and the value provided
126 * by the backend driver.
129 static unsigned int xen_blkif_max_segments = 32;
130 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
132 MODULE_PARM_DESC(max_indirect_segments,
133 "Maximum amount of segments in indirect requests (default is 32)");
135 static unsigned int xen_blkif_max_queues = 4;
136 module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
137 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
140 * Maximum order of pages to be used for the shared ring between front and
141 * backend, 4KB page granularity is used.
143 static unsigned int xen_blkif_max_ring_order;
144 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
145 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
147 static bool __read_mostly xen_blkif_trusted = true;
148 module_param_named(trusted, xen_blkif_trusted, bool, 0644);
149 MODULE_PARM_DESC(trusted, "Is the backend trusted");
151 #define BLK_RING_SIZE(info) \
152 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
154 #define BLK_MAX_RING_SIZE \
155 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
158 * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
159 * characters are enough. Define to 20 to keep consistent with backend.
161 #define RINGREF_NAME_LEN (20)
163 * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
165 #define QUEUE_NAME_LEN (17)
169 * Every blkfront device can associate with one or more blkfront_ring_info,
170 * depending on how many hardware queues/rings to be used.
172 struct blkfront_ring_info {
173 /* Lock to protect data in every ring buffer. */
174 spinlock_t ring_lock;
175 struct blkif_front_ring ring;
176 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
177 unsigned int evtchn, irq;
178 struct work_struct work;
179 struct gnttab_free_callback callback;
180 struct blk_shadow shadow[BLK_MAX_RING_SIZE];
181 struct list_head indirect_pages;
182 struct list_head grants;
183 unsigned int persistent_gnts_c;
184 unsigned long shadow_free;
185 struct blkfront_info *dev_info;
189 * We have one of these per vbd, whether ide, scsi or 'other'. They
190 * hang in private_data off the gendisk structure. We may end up
191 * putting all kinds of interesting stuff here :-)
196 struct xenbus_device *xbdev;
199 unsigned int physical_sector_size;
202 enum blkif_state connected;
203 /* Number of pages per ring buffer. */
204 unsigned int nr_ring_pages;
205 struct request_queue *rq;
206 unsigned int feature_flush;
207 unsigned int feature_fua;
208 unsigned int feature_discard:1;
209 unsigned int feature_secdiscard:1;
210 unsigned int discard_granularity;
211 unsigned int discard_alignment;
212 unsigned int feature_persistent:1;
213 unsigned int bounce:1;
214 /* Number of 4KB segments handled */
215 unsigned int max_indirect_segments;
217 struct blk_mq_tag_set tag_set;
218 struct blkfront_ring_info *rinfo;
219 unsigned int nr_rings;
220 /* Save uncomplete reqs and bios for migration. */
221 struct list_head requests;
222 struct bio_list bio_list;
225 static unsigned int nr_minors;
226 static unsigned long *minors;
227 static DEFINE_SPINLOCK(minor_lock);
229 #define GRANT_INVALID_REF 0
231 #define PARTS_PER_DISK 16
232 #define PARTS_PER_EXT_DISK 256
234 #define BLKIF_MAJOR(dev) ((dev)>>8)
235 #define BLKIF_MINOR(dev) ((dev) & 0xff)
238 #define EXTENDED (1<<EXT_SHIFT)
239 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
240 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
241 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
242 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
243 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
244 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
246 #define DEV_NAME "xvd" /* name in /dev */
249 * Grants are always the same size as a Xen page (i.e 4KB).
250 * A physical segment is always the same size as a Linux page.
251 * Number of grants per physical segment
253 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
255 #define GRANTS_PER_INDIRECT_FRAME \
256 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
258 #define PSEGS_PER_INDIRECT_FRAME \
259 (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
261 #define INDIRECT_GREFS(_grants) \
262 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
264 #define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
266 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
267 static void blkfront_gather_backend_features(struct blkfront_info *info);
269 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
271 unsigned long free = rinfo->shadow_free;
273 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
274 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
275 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
279 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
282 if (rinfo->shadow[id].req.u.rw.id != id)
284 if (rinfo->shadow[id].request == NULL)
286 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
287 rinfo->shadow[id].request = NULL;
288 rinfo->shadow_free = id;
292 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
294 struct blkfront_info *info = rinfo->dev_info;
295 struct page *granted_page;
296 struct grant *gnt_list_entry, *n;
300 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
305 granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
307 kfree(gnt_list_entry);
310 gnt_list_entry->page = granted_page;
313 gnt_list_entry->gref = GRANT_INVALID_REF;
314 list_add(&gnt_list_entry->node, &rinfo->grants);
321 list_for_each_entry_safe(gnt_list_entry, n,
322 &rinfo->grants, node) {
323 list_del(&gnt_list_entry->node);
325 __free_page(gnt_list_entry->page);
326 kfree(gnt_list_entry);
333 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
335 struct grant *gnt_list_entry;
337 BUG_ON(list_empty(&rinfo->grants));
338 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
340 list_del(&gnt_list_entry->node);
342 if (gnt_list_entry->gref != GRANT_INVALID_REF)
343 rinfo->persistent_gnts_c--;
345 return gnt_list_entry;
348 static inline void grant_foreign_access(const struct grant *gnt_list_entry,
349 const struct blkfront_info *info)
351 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
352 info->xbdev->otherend_id,
353 gnt_list_entry->page,
357 static struct grant *get_grant(grant_ref_t *gref_head,
359 struct blkfront_ring_info *rinfo)
361 struct grant *gnt_list_entry = get_free_grant(rinfo);
362 struct blkfront_info *info = rinfo->dev_info;
364 if (gnt_list_entry->gref != GRANT_INVALID_REF)
365 return gnt_list_entry;
367 /* Assign a gref to this page */
368 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
369 BUG_ON(gnt_list_entry->gref == -ENOSPC);
371 grant_foreign_access(gnt_list_entry, info);
373 /* Grant access to the GFN passed by the caller */
374 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
375 info->xbdev->otherend_id,
379 return gnt_list_entry;
382 static struct grant *get_indirect_grant(grant_ref_t *gref_head,
383 struct blkfront_ring_info *rinfo)
385 struct grant *gnt_list_entry = get_free_grant(rinfo);
386 struct blkfront_info *info = rinfo->dev_info;
388 if (gnt_list_entry->gref != GRANT_INVALID_REF)
389 return gnt_list_entry;
391 /* Assign a gref to this page */
392 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
393 BUG_ON(gnt_list_entry->gref == -ENOSPC);
395 struct page *indirect_page;
397 /* Fetch a pre-allocated page to use for indirect grefs */
398 BUG_ON(list_empty(&rinfo->indirect_pages));
399 indirect_page = list_first_entry(&rinfo->indirect_pages,
401 list_del(&indirect_page->lru);
402 gnt_list_entry->page = indirect_page;
404 grant_foreign_access(gnt_list_entry, info);
406 return gnt_list_entry;
409 static const char *op_name(int op)
411 static const char *const names[] = {
412 [BLKIF_OP_READ] = "read",
413 [BLKIF_OP_WRITE] = "write",
414 [BLKIF_OP_WRITE_BARRIER] = "barrier",
415 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
416 [BLKIF_OP_DISCARD] = "discard" };
418 if (op < 0 || op >= ARRAY_SIZE(names))
426 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
428 unsigned int end = minor + nr;
431 if (end > nr_minors) {
432 unsigned long *bitmap, *old;
434 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
439 spin_lock(&minor_lock);
440 if (end > nr_minors) {
442 memcpy(bitmap, minors,
443 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
445 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
448 spin_unlock(&minor_lock);
452 spin_lock(&minor_lock);
453 if (find_next_bit(minors, end, minor) >= end) {
454 bitmap_set(minors, minor, nr);
458 spin_unlock(&minor_lock);
463 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
465 unsigned int end = minor + nr;
467 BUG_ON(end > nr_minors);
468 spin_lock(&minor_lock);
469 bitmap_clear(minors, minor, nr);
470 spin_unlock(&minor_lock);
473 static void blkif_restart_queue_callback(void *arg)
475 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
476 schedule_work(&rinfo->work);
479 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
481 /* We don't have real geometry info, but let's at least return
482 values consistent with the size of the device */
483 sector_t nsect = get_capacity(bd->bd_disk);
484 sector_t cylinders = nsect;
488 sector_div(cylinders, hg->heads * hg->sectors);
489 hg->cylinders = cylinders;
490 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
491 hg->cylinders = 0xffff;
495 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
496 unsigned command, unsigned long argument)
498 struct blkfront_info *info = bdev->bd_disk->private_data;
501 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
502 command, (long)argument);
505 case CDROMMULTISESSION:
506 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
507 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
508 if (put_user(0, (char __user *)(argument + i)))
512 case CDROM_GET_CAPABILITY: {
513 struct gendisk *gd = info->gd;
514 if (gd->flags & GENHD_FL_CD)
520 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
522 return -EINVAL; /* same return as native Linux */
528 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
530 struct blkif_request **ring_req)
534 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
535 rinfo->ring.req_prod_pvt++;
537 id = get_id_from_freelist(rinfo);
538 rinfo->shadow[id].request = req;
539 rinfo->shadow[id].status = REQ_PROCESSING;
540 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
542 rinfo->shadow[id].req.u.rw.id = id;
547 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
549 struct blkfront_info *info = rinfo->dev_info;
550 struct blkif_request *ring_req, *final_ring_req;
553 /* Fill out a communications ring structure. */
554 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
555 ring_req = &rinfo->shadow[id].req;
557 ring_req->operation = BLKIF_OP_DISCARD;
558 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
559 ring_req->u.discard.id = id;
560 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
561 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
562 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
564 ring_req->u.discard.flag = 0;
566 /* Copy the request to the ring page. */
567 *final_ring_req = *ring_req;
568 rinfo->shadow[id].status = REQ_WAITING;
573 struct setup_rw_req {
574 unsigned int grant_idx;
575 struct blkif_request_segment *segments;
576 struct blkfront_ring_info *rinfo;
577 struct blkif_request *ring_req;
578 grant_ref_t gref_head;
580 /* Only used when persistent grant is used and it's a read request */
582 unsigned int bvec_off;
585 bool require_extra_req;
586 struct blkif_request *extra_ring_req;
589 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
590 unsigned int len, void *data)
592 struct setup_rw_req *setup = data;
594 struct grant *gnt_list_entry;
595 unsigned int fsect, lsect;
596 /* Convenient aliases */
597 unsigned int grant_idx = setup->grant_idx;
598 struct blkif_request *ring_req = setup->ring_req;
599 struct blkfront_ring_info *rinfo = setup->rinfo;
601 * We always use the shadow of the first request to store the list
602 * of grant associated to the block I/O request. This made the
603 * completion more easy to handle even if the block I/O request is
606 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
608 if (unlikely(setup->require_extra_req &&
609 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
611 * We are using the second request, setup grant_idx
612 * to be the index of the segment array.
614 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
615 ring_req = setup->extra_ring_req;
618 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
619 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
621 kunmap_atomic(setup->segments);
623 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
624 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
625 shadow->indirect_grants[n] = gnt_list_entry;
626 setup->segments = kmap_atomic(gnt_list_entry->page);
627 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
630 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
631 ref = gnt_list_entry->gref;
633 * All the grants are stored in the shadow of the first
634 * request. Therefore we have to use the global index.
636 shadow->grants_used[setup->grant_idx] = gnt_list_entry;
638 if (setup->need_copy) {
641 shared_data = kmap_atomic(gnt_list_entry->page);
643 * this does not wipe data stored outside the
644 * range sg->offset..sg->offset+sg->length.
645 * Therefore, blkback *could* see data from
646 * previous requests. This is OK as long as
647 * persistent grants are shared with just one
648 * domain. It may need refactoring if this
651 memcpy(shared_data + offset,
652 setup->bvec_data + setup->bvec_off,
655 kunmap_atomic(shared_data);
656 setup->bvec_off += len;
660 lsect = fsect + (len >> 9) - 1;
661 if (ring_req->operation != BLKIF_OP_INDIRECT) {
662 ring_req->u.rw.seg[grant_idx] =
663 (struct blkif_request_segment) {
666 .last_sect = lsect };
668 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
669 (struct blkif_request_segment) {
672 .last_sect = lsect };
675 (setup->grant_idx)++;
678 static void blkif_setup_extra_req(struct blkif_request *first,
679 struct blkif_request *second)
681 uint16_t nr_segments = first->u.rw.nr_segments;
684 * The second request is only present when the first request uses
685 * all its segments. It's always the continuity of the first one.
687 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
689 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
690 second->u.rw.sector_number = first->u.rw.sector_number +
691 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
693 second->u.rw.handle = first->u.rw.handle;
694 second->operation = first->operation;
697 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
699 struct blkfront_info *info = rinfo->dev_info;
700 struct blkif_request *ring_req, *extra_ring_req = NULL;
701 struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
702 unsigned long id, extra_id = NO_ASSOCIATED_ID;
703 bool require_extra_req = false;
705 struct setup_rw_req setup = {
709 .need_copy = rq_data_dir(req) && info->bounce,
713 * Used to store if we are able to queue the request by just using
714 * existing persistent grants, or if we have to get new grants,
715 * as there are not sufficiently many free.
717 struct scatterlist *sg;
718 int num_sg, max_grefs, num_grant;
720 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
721 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
723 * If we are using indirect segments we need to account
724 * for the indirect grefs used in the request.
726 max_grefs += INDIRECT_GREFS(max_grefs);
729 * We have to reserve 'max_grefs' grants because persistent
730 * grants are shared by all rings.
733 if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
734 gnttab_request_free_callback(
736 blkif_restart_queue_callback,
742 /* Fill out a communications ring structure. */
743 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
744 ring_req = &rinfo->shadow[id].req;
746 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
748 /* Calculate the number of grant used */
749 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
750 num_grant += gnttab_count_grant(sg->offset, sg->length);
752 require_extra_req = info->max_indirect_segments == 0 &&
753 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
754 BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
756 rinfo->shadow[id].num_sg = num_sg;
757 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
758 likely(!require_extra_req)) {
760 * The indirect operation can only be a BLKIF_OP_READ or
763 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
764 ring_req->operation = BLKIF_OP_INDIRECT;
765 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
766 BLKIF_OP_WRITE : BLKIF_OP_READ;
767 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
768 ring_req->u.indirect.handle = info->handle;
769 ring_req->u.indirect.nr_segments = num_grant;
771 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
772 ring_req->u.rw.handle = info->handle;
773 ring_req->operation = rq_data_dir(req) ?
774 BLKIF_OP_WRITE : BLKIF_OP_READ;
775 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
777 * Ideally we can do an unordered flush-to-disk.
778 * In case the backend onlysupports barriers, use that.
779 * A barrier request a superset of FUA, so we can
780 * implement it the same way. (It's also a FLUSH+FUA,
781 * since it is guaranteed ordered WRT previous writes.)
783 if (info->feature_flush && info->feature_fua)
784 ring_req->operation =
785 BLKIF_OP_WRITE_BARRIER;
786 else if (info->feature_flush)
787 ring_req->operation =
788 BLKIF_OP_FLUSH_DISKCACHE;
790 ring_req->operation = 0;
792 ring_req->u.rw.nr_segments = num_grant;
793 if (unlikely(require_extra_req)) {
794 extra_id = blkif_ring_get_request(rinfo, req,
795 &final_extra_ring_req);
796 extra_ring_req = &rinfo->shadow[extra_id].req;
799 * Only the first request contains the scatter-gather
802 rinfo->shadow[extra_id].num_sg = 0;
804 blkif_setup_extra_req(ring_req, extra_ring_req);
806 /* Link the 2 requests together */
807 rinfo->shadow[extra_id].associated_id = id;
808 rinfo->shadow[id].associated_id = extra_id;
812 setup.ring_req = ring_req;
815 setup.require_extra_req = require_extra_req;
816 if (unlikely(require_extra_req))
817 setup.extra_ring_req = extra_ring_req;
819 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
820 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
822 if (setup.need_copy) {
823 setup.bvec_off = sg->offset;
824 setup.bvec_data = kmap_atomic(sg_page(sg));
827 gnttab_foreach_grant_in_range(sg_page(sg),
830 blkif_setup_rw_req_grant,
834 kunmap_atomic(setup.bvec_data);
837 kunmap_atomic(setup.segments);
839 /* Copy request(s) to the ring page. */
840 *final_ring_req = *ring_req;
841 rinfo->shadow[id].status = REQ_WAITING;
842 if (unlikely(require_extra_req)) {
843 *final_extra_ring_req = *extra_ring_req;
844 rinfo->shadow[extra_id].status = REQ_WAITING;
848 gnttab_free_grant_references(setup.gref_head);
854 * Generate a Xen blkfront IO request from a blk layer request. Reads
855 * and writes are handled as expected.
857 * @req: a request struct
859 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
861 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
864 if (unlikely(req_op(req) == REQ_OP_DISCARD ||
865 req_op(req) == REQ_OP_SECURE_ERASE))
866 return blkif_queue_discard_req(req, rinfo);
868 return blkif_queue_rw_req(req, rinfo);
871 static inline void flush_requests(struct blkfront_ring_info *rinfo)
875 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
878 notify_remote_via_irq(rinfo->irq);
881 static inline bool blkif_request_flush_invalid(struct request *req,
882 struct blkfront_info *info)
884 return ((req->cmd_type != REQ_TYPE_FS) ||
885 ((req_op(req) == REQ_OP_FLUSH) &&
886 !info->feature_flush) ||
887 ((req->cmd_flags & REQ_FUA) &&
888 !info->feature_fua));
891 static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
892 const struct blk_mq_queue_data *qd)
895 int qid = hctx->queue_num;
896 struct blkfront_info *info = hctx->queue->queuedata;
897 struct blkfront_ring_info *rinfo = NULL;
899 BUG_ON(info->nr_rings <= qid);
900 rinfo = &info->rinfo[qid];
901 blk_mq_start_request(qd->rq);
902 spin_lock_irqsave(&rinfo->ring_lock, flags);
903 if (RING_FULL(&rinfo->ring))
906 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
909 if (blkif_queue_request(qd->rq, rinfo))
912 flush_requests(rinfo);
913 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
914 return BLK_MQ_RQ_QUEUE_OK;
917 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
918 return BLK_MQ_RQ_QUEUE_ERROR;
921 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
922 blk_mq_stop_hw_queue(hctx);
923 return BLK_MQ_RQ_QUEUE_BUSY;
926 static struct blk_mq_ops blkfront_mq_ops = {
927 .queue_rq = blkif_queue_rq,
930 static void blkif_set_queue_limits(struct blkfront_info *info)
932 struct request_queue *rq = info->rq;
933 struct gendisk *gd = info->gd;
934 unsigned int segments = info->max_indirect_segments ? :
935 BLKIF_MAX_SEGMENTS_PER_REQUEST;
937 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
939 if (info->feature_discard) {
940 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
941 blk_queue_max_discard_sectors(rq, get_capacity(gd));
942 rq->limits.discard_granularity = info->discard_granularity;
943 rq->limits.discard_alignment = info->discard_alignment;
944 if (info->feature_secdiscard)
945 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
948 /* Hard sector size and max sectors impersonate the equiv. hardware. */
949 blk_queue_logical_block_size(rq, info->sector_size);
950 blk_queue_physical_block_size(rq, info->physical_sector_size);
951 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
953 /* Each segment in a request is up to an aligned page in size. */
954 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
955 blk_queue_max_segment_size(rq, PAGE_SIZE);
957 /* Ensure a merged request will fit in a single I/O ring slot. */
958 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
960 /* Make sure buffer addresses are sector-aligned. */
961 blk_queue_dma_alignment(rq, 511);
963 /* Make sure we don't use bounce buffers. */
964 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
967 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
968 unsigned int physical_sector_size)
970 struct request_queue *rq;
971 struct blkfront_info *info = gd->private_data;
973 memset(&info->tag_set, 0, sizeof(info->tag_set));
974 info->tag_set.ops = &blkfront_mq_ops;
975 info->tag_set.nr_hw_queues = info->nr_rings;
976 if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
978 * When indirect descriptior is not supported, the I/O request
979 * will be split between multiple request in the ring.
980 * To avoid problems when sending the request, divide by
981 * 2 the depth of the queue.
983 info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
985 info->tag_set.queue_depth = BLK_RING_SIZE(info);
986 info->tag_set.numa_node = NUMA_NO_NODE;
987 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
988 info->tag_set.cmd_size = 0;
989 info->tag_set.driver_data = info;
991 if (blk_mq_alloc_tag_set(&info->tag_set))
993 rq = blk_mq_init_queue(&info->tag_set);
995 blk_mq_free_tag_set(&info->tag_set);
999 rq->queuedata = info;
1000 info->rq = gd->queue = rq;
1002 info->sector_size = sector_size;
1003 info->physical_sector_size = physical_sector_size;
1004 blkif_set_queue_limits(info);
1009 static const char *flush_info(struct blkfront_info *info)
1011 if (info->feature_flush && info->feature_fua)
1012 return "barrier: enabled;";
1013 else if (info->feature_flush)
1014 return "flush diskcache: enabled;";
1016 return "barrier or flush: disabled;";
1019 static void xlvbd_flush(struct blkfront_info *info)
1021 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
1022 info->feature_fua ? true : false);
1023 pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
1024 info->gd->disk_name, flush_info(info),
1025 "persistent grants:", info->feature_persistent ?
1026 "enabled;" : "disabled;", "indirect descriptors:",
1027 info->max_indirect_segments ? "enabled;" : "disabled;",
1028 "bounce buffer:", info->bounce ? "enabled" : "disabled;");
1031 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1034 major = BLKIF_MAJOR(vdevice);
1035 *minor = BLKIF_MINOR(vdevice);
1037 case XEN_IDE0_MAJOR:
1038 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1039 *minor = ((*minor / 64) * PARTS_PER_DISK) +
1040 EMULATED_HD_DISK_MINOR_OFFSET;
1042 case XEN_IDE1_MAJOR:
1043 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1044 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1045 EMULATED_HD_DISK_MINOR_OFFSET;
1047 case XEN_SCSI_DISK0_MAJOR:
1048 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1049 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1051 case XEN_SCSI_DISK1_MAJOR:
1052 case XEN_SCSI_DISK2_MAJOR:
1053 case XEN_SCSI_DISK3_MAJOR:
1054 case XEN_SCSI_DISK4_MAJOR:
1055 case XEN_SCSI_DISK5_MAJOR:
1056 case XEN_SCSI_DISK6_MAJOR:
1057 case XEN_SCSI_DISK7_MAJOR:
1058 *offset = (*minor / PARTS_PER_DISK) +
1059 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1060 EMULATED_SD_DISK_NAME_OFFSET;
1062 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1063 EMULATED_SD_DISK_MINOR_OFFSET;
1065 case XEN_SCSI_DISK8_MAJOR:
1066 case XEN_SCSI_DISK9_MAJOR:
1067 case XEN_SCSI_DISK10_MAJOR:
1068 case XEN_SCSI_DISK11_MAJOR:
1069 case XEN_SCSI_DISK12_MAJOR:
1070 case XEN_SCSI_DISK13_MAJOR:
1071 case XEN_SCSI_DISK14_MAJOR:
1072 case XEN_SCSI_DISK15_MAJOR:
1073 *offset = (*minor / PARTS_PER_DISK) +
1074 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1075 EMULATED_SD_DISK_NAME_OFFSET;
1077 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1078 EMULATED_SD_DISK_MINOR_OFFSET;
1081 *offset = *minor / PARTS_PER_DISK;
1084 printk(KERN_WARNING "blkfront: your disk configuration is "
1085 "incorrect, please use an xvd device instead\n");
1091 static char *encode_disk_name(char *ptr, unsigned int n)
1094 ptr = encode_disk_name(ptr, n / 26 - 1);
1095 *ptr = 'a' + n % 26;
1099 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1100 struct blkfront_info *info,
1101 u16 vdisk_info, u16 sector_size,
1102 unsigned int physical_sector_size)
1107 unsigned int offset;
1112 BUG_ON(info->gd != NULL);
1113 BUG_ON(info->rq != NULL);
1115 if ((info->vdevice>>EXT_SHIFT) > 1) {
1116 /* this is above the extended range; something is wrong */
1117 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1121 if (!VDEV_IS_EXTENDED(info->vdevice)) {
1122 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1125 nr_parts = PARTS_PER_DISK;
1127 minor = BLKIF_MINOR_EXT(info->vdevice);
1128 nr_parts = PARTS_PER_EXT_DISK;
1129 offset = minor / nr_parts;
1130 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1131 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1132 "emulated IDE disks,\n\t choose an xvd device name"
1133 "from xvde on\n", info->vdevice);
1135 if (minor >> MINORBITS) {
1136 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1137 info->vdevice, minor);
1141 if ((minor % nr_parts) == 0)
1142 nr_minors = nr_parts;
1144 err = xlbd_reserve_minors(minor, nr_minors);
1149 gd = alloc_disk(nr_minors);
1153 strcpy(gd->disk_name, DEV_NAME);
1154 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1155 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1159 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1160 "%d", minor & (nr_parts - 1));
1162 gd->major = XENVBD_MAJOR;
1163 gd->first_minor = minor;
1164 gd->fops = &xlvbd_block_fops;
1165 gd->private_data = info;
1166 set_capacity(gd, capacity);
1168 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
1175 if (vdisk_info & VDISK_READONLY)
1178 if (vdisk_info & VDISK_REMOVABLE)
1179 gd->flags |= GENHD_FL_REMOVABLE;
1181 if (vdisk_info & VDISK_CDROM)
1182 gd->flags |= GENHD_FL_CD;
1187 xlbd_release_minors(minor, nr_minors);
1192 static void xlvbd_release_gendisk(struct blkfront_info *info)
1194 unsigned int minor, nr_minors, i;
1196 if (info->rq == NULL)
1199 /* No more blkif_request(). */
1200 blk_mq_stop_hw_queues(info->rq);
1202 for (i = 0; i < info->nr_rings; i++) {
1203 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1205 /* No more gnttab callback work. */
1206 gnttab_cancel_free_callback(&rinfo->callback);
1208 /* Flush gnttab callback work. Must be done with no locks held. */
1209 flush_work(&rinfo->work);
1212 del_gendisk(info->gd);
1214 minor = info->gd->first_minor;
1215 nr_minors = info->gd->minors;
1216 xlbd_release_minors(minor, nr_minors);
1218 blk_cleanup_queue(info->rq);
1219 blk_mq_free_tag_set(&info->tag_set);
1226 /* Already hold rinfo->ring_lock. */
1227 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1229 if (!RING_FULL(&rinfo->ring))
1230 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1233 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1235 unsigned long flags;
1237 spin_lock_irqsave(&rinfo->ring_lock, flags);
1238 kick_pending_request_queues_locked(rinfo);
1239 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1242 static void blkif_restart_queue(struct work_struct *work)
1244 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1246 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1247 kick_pending_request_queues(rinfo);
1250 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1252 struct grant *persistent_gnt, *n;
1253 struct blkfront_info *info = rinfo->dev_info;
1257 * Remove indirect pages, this only happens when using indirect
1258 * descriptors but not persistent grants
1260 if (!list_empty(&rinfo->indirect_pages)) {
1261 struct page *indirect_page, *n;
1263 BUG_ON(info->bounce);
1264 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1265 list_del(&indirect_page->lru);
1266 __free_page(indirect_page);
1270 /* Remove all persistent grants. */
1271 if (!list_empty(&rinfo->grants)) {
1272 list_for_each_entry_safe(persistent_gnt, n,
1273 &rinfo->grants, node) {
1274 list_del(&persistent_gnt->node);
1275 if (persistent_gnt->gref == GRANT_INVALID_REF ||
1276 !gnttab_try_end_foreign_access(persistent_gnt->gref))
1279 rinfo->persistent_gnts_c--;
1281 __free_page(persistent_gnt->page);
1282 kfree(persistent_gnt);
1286 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1288 * Clear persistent grants present in requests already
1289 * on the shared ring
1291 if (!rinfo->shadow[i].request)
1294 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1295 rinfo->shadow[i].req.u.indirect.nr_segments :
1296 rinfo->shadow[i].req.u.rw.nr_segments;
1297 for (j = 0; j < segs; j++) {
1298 persistent_gnt = rinfo->shadow[i].grants_used[j];
1299 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1301 __free_page(persistent_gnt->page);
1302 kfree(persistent_gnt);
1305 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1307 * If this is not an indirect operation don't try to
1308 * free indirect segments
1312 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1313 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1314 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1315 __free_page(persistent_gnt->page);
1316 kfree(persistent_gnt);
1320 kfree(rinfo->shadow[i].grants_used);
1321 rinfo->shadow[i].grants_used = NULL;
1322 kfree(rinfo->shadow[i].indirect_grants);
1323 rinfo->shadow[i].indirect_grants = NULL;
1324 kfree(rinfo->shadow[i].sg);
1325 rinfo->shadow[i].sg = NULL;
1328 /* No more gnttab callback work. */
1329 gnttab_cancel_free_callback(&rinfo->callback);
1331 /* Flush gnttab callback work. Must be done with no locks held. */
1332 flush_work(&rinfo->work);
1334 /* Free resources associated with old device channel. */
1335 for (i = 0; i < info->nr_ring_pages; i++) {
1336 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1337 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1338 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1341 free_pages_exact(rinfo->ring.sring,
1342 info->nr_ring_pages * XEN_PAGE_SIZE);
1343 rinfo->ring.sring = NULL;
1346 unbind_from_irqhandler(rinfo->irq, rinfo);
1347 rinfo->evtchn = rinfo->irq = 0;
1350 static void blkif_free(struct blkfront_info *info, int suspend)
1354 /* Prevent new requests being issued until we fix things up. */
1355 info->connected = suspend ?
1356 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1357 /* No more blkif_request(). */
1359 blk_mq_stop_hw_queues(info->rq);
1361 for (i = 0; i < info->nr_rings; i++)
1362 blkif_free_ring(&info->rinfo[i]);
1369 struct copy_from_grant {
1370 const struct blk_shadow *s;
1371 unsigned int grant_idx;
1372 unsigned int bvec_offset;
1376 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1377 unsigned int len, void *data)
1379 struct copy_from_grant *info = data;
1381 /* Convenient aliases */
1382 const struct blk_shadow *s = info->s;
1384 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1386 memcpy(info->bvec_data + info->bvec_offset,
1387 shared_data + offset, len);
1389 info->bvec_offset += len;
1392 kunmap_atomic(shared_data);
1395 static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1399 case BLKIF_RSP_OKAY:
1401 case BLKIF_RSP_EOPNOTSUPP:
1402 return REQ_EOPNOTSUPP;
1403 case BLKIF_RSP_ERROR:
1411 * Get the final status of the block request based on two ring response
1413 static int blkif_get_final_status(enum blk_req_status s1,
1414 enum blk_req_status s2)
1416 BUG_ON(s1 < REQ_DONE);
1417 BUG_ON(s2 < REQ_DONE);
1419 if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1420 return BLKIF_RSP_ERROR;
1421 else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1422 return BLKIF_RSP_EOPNOTSUPP;
1423 return BLKIF_RSP_OKAY;
1428 * 1 response processed.
1429 * 0 missing further responses.
1430 * -1 error while processing.
1432 static int blkif_completion(unsigned long *id,
1433 struct blkfront_ring_info *rinfo,
1434 struct blkif_response *bret)
1437 struct scatterlist *sg;
1438 int num_sg, num_grant;
1439 struct blkfront_info *info = rinfo->dev_info;
1440 struct blk_shadow *s = &rinfo->shadow[*id];
1441 struct copy_from_grant data = {
1445 num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1446 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1448 /* The I/O request may be split in two. */
1449 if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1450 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1452 /* Keep the status of the current response in shadow. */
1453 s->status = blkif_rsp_to_req_status(bret->status);
1455 /* Wait the second response if not yet here. */
1456 if (s2->status < REQ_DONE)
1459 bret->status = blkif_get_final_status(s->status,
1463 * All the grants is stored in the first shadow in order
1464 * to make the completion code simpler.
1466 num_grant += s2->req.u.rw.nr_segments;
1469 * The two responses may not come in order. Only the
1470 * first request will store the scatter-gather list.
1472 if (s2->num_sg != 0) {
1473 /* Update "id" with the ID of the first response. */
1474 *id = s->associated_id;
1479 * We don't need anymore the second request, so recycling
1482 if (add_id_to_freelist(rinfo, s->associated_id))
1483 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1484 info->gd->disk_name, s->associated_id);
1490 if (bret->operation == BLKIF_OP_READ && info->bounce) {
1491 for_each_sg(s->sg, sg, num_sg, i) {
1492 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1494 data.bvec_offset = sg->offset;
1495 data.bvec_data = kmap_atomic(sg_page(sg));
1497 gnttab_foreach_grant_in_range(sg_page(sg),
1500 blkif_copy_from_grant,
1503 kunmap_atomic(data.bvec_data);
1506 /* Add the persistent grant into the list of free grants */
1507 for (i = 0; i < num_grant; i++) {
1508 if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
1510 * If the grant is still mapped by the backend (the
1511 * backend has chosen to make this grant persistent)
1512 * we add it at the head of the list, so it will be
1515 if (!info->feature_persistent) {
1516 pr_alert("backed has not unmapped grant: %u\n",
1517 s->grants_used[i]->gref);
1520 list_add(&s->grants_used[i]->node, &rinfo->grants);
1521 rinfo->persistent_gnts_c++;
1524 * If the grant is not mapped by the backend we add it
1525 * to the tail of the list, so it will not be picked
1526 * again unless we run out of persistent grants.
1528 s->grants_used[i]->gref = GRANT_INVALID_REF;
1529 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1532 if (s->req.operation == BLKIF_OP_INDIRECT) {
1533 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1534 if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
1535 if (!info->feature_persistent) {
1536 pr_alert("backed has not unmapped grant: %u\n",
1537 s->indirect_grants[i]->gref);
1540 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1541 rinfo->persistent_gnts_c++;
1543 struct page *indirect_page;
1546 * Add the used indirect page back to the list of
1547 * available pages for indirect grefs.
1549 if (!info->bounce) {
1550 indirect_page = s->indirect_grants[i]->page;
1551 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1553 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1554 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1562 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1564 struct request *req;
1565 struct blkif_response bret;
1567 unsigned long flags;
1568 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1569 struct blkfront_info *info = rinfo->dev_info;
1571 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1573 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1574 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1578 spin_lock_irqsave(&rinfo->ring_lock, flags);
1580 rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1581 virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
1582 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1583 pr_alert("%s: illegal number of responses %u\n",
1584 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1588 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1594 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1598 * The backend has messed up and given us an id that we would
1599 * never have given to it (we stamp it up to BLK_RING_SIZE -
1600 * look in get_id_from_freelist.
1602 if (id >= BLK_RING_SIZE(info)) {
1603 pr_alert("%s: response has incorrect id (%ld)\n",
1604 info->gd->disk_name, id);
1607 if (rinfo->shadow[id].status != REQ_WAITING) {
1608 pr_alert("%s: response references no pending request\n",
1609 info->gd->disk_name);
1613 rinfo->shadow[id].status = REQ_PROCESSING;
1614 req = rinfo->shadow[id].request;
1616 op = rinfo->shadow[id].req.operation;
1617 if (op == BLKIF_OP_INDIRECT)
1618 op = rinfo->shadow[id].req.u.indirect.indirect_op;
1619 if (bret.operation != op) {
1620 pr_alert("%s: response has wrong operation (%u instead of %u)\n",
1621 info->gd->disk_name, bret.operation, op);
1625 if (bret.operation != BLKIF_OP_DISCARD) {
1629 * We may need to wait for an extra response if the
1630 * I/O request is split in 2
1632 ret = blkif_completion(&id, rinfo, &bret);
1635 if (unlikely(ret < 0))
1639 if (add_id_to_freelist(rinfo, id)) {
1640 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1641 info->gd->disk_name, op_name(bret.operation), id);
1645 error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1646 switch (bret.operation) {
1647 case BLKIF_OP_DISCARD:
1648 if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1649 struct request_queue *rq = info->rq;
1651 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1652 info->gd->disk_name, op_name(bret.operation));
1653 error = -EOPNOTSUPP;
1654 info->feature_discard = 0;
1655 info->feature_secdiscard = 0;
1656 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1657 queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1659 blk_mq_complete_request(req, error);
1661 case BLKIF_OP_FLUSH_DISKCACHE:
1662 case BLKIF_OP_WRITE_BARRIER:
1663 if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1664 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1665 info->gd->disk_name, op_name(bret.operation));
1666 error = -EOPNOTSUPP;
1668 if (unlikely(bret.status == BLKIF_RSP_ERROR &&
1669 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1670 pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
1671 info->gd->disk_name, op_name(bret.operation));
1672 error = -EOPNOTSUPP;
1674 if (unlikely(error)) {
1675 if (error == -EOPNOTSUPP)
1677 info->feature_fua = 0;
1678 info->feature_flush = 0;
1683 case BLKIF_OP_WRITE:
1684 if (unlikely(bret.status != BLKIF_RSP_OKAY))
1685 dev_dbg_ratelimited(&info->xbdev->dev,
1686 "Bad return from blkdev data request: %#x\n",
1689 blk_mq_complete_request(req, error);
1696 rinfo->ring.rsp_cons = i;
1698 if (i != rinfo->ring.req_prod_pvt) {
1700 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1704 rinfo->ring.sring->rsp_event = i + 1;
1706 kick_pending_request_queues_locked(rinfo);
1708 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1710 xen_irq_lateeoi(irq, eoiflag);
1715 info->connected = BLKIF_STATE_ERROR;
1717 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1719 /* No EOI in order to avoid further interrupts. */
1721 pr_alert("%s disabled for further use\n", info->gd->disk_name);
1726 static int setup_blkring(struct xenbus_device *dev,
1727 struct blkfront_ring_info *rinfo)
1729 struct blkif_sring *sring;
1731 struct blkfront_info *info = rinfo->dev_info;
1732 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1733 grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1735 for (i = 0; i < info->nr_ring_pages; i++)
1736 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1738 sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
1740 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1743 SHARED_RING_INIT(sring);
1744 FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1746 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1748 free_pages_exact(sring, ring_size);
1749 rinfo->ring.sring = NULL;
1752 for (i = 0; i < info->nr_ring_pages; i++)
1753 rinfo->ring_ref[i] = gref[i];
1755 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1759 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1762 xenbus_dev_fatal(dev, err,
1763 "bind_evtchn_to_irqhandler failed");
1770 blkif_free(info, 0);
1775 * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1776 * ring buffer may have multi pages depending on ->nr_ring_pages.
1778 static int write_per_ring_nodes(struct xenbus_transaction xbt,
1779 struct blkfront_ring_info *rinfo, const char *dir)
1783 const char *message = NULL;
1784 struct blkfront_info *info = rinfo->dev_info;
1786 if (info->nr_ring_pages == 1) {
1787 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1789 message = "writing ring-ref";
1790 goto abort_transaction;
1793 for (i = 0; i < info->nr_ring_pages; i++) {
1794 char ring_ref_name[RINGREF_NAME_LEN];
1796 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1797 err = xenbus_printf(xbt, dir, ring_ref_name,
1798 "%u", rinfo->ring_ref[i]);
1800 message = "writing ring-ref";
1801 goto abort_transaction;
1806 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1808 message = "writing event-channel";
1809 goto abort_transaction;
1815 xenbus_transaction_end(xbt, 1);
1817 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1822 /* Common code used when first setting up, and when resuming. */
1823 static int talk_to_blkback(struct xenbus_device *dev,
1824 struct blkfront_info *info)
1826 const char *message = NULL;
1827 struct xenbus_transaction xbt;
1829 unsigned int i, max_page_order = 0;
1830 unsigned int ring_page_order = 0;
1831 unsigned int trusted;
1833 /* Check if backend is trusted. */
1834 err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted);
1837 info->bounce = !xen_blkif_trusted || !trusted;
1839 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1840 "max-ring-page-order", "%u", &max_page_order);
1842 info->nr_ring_pages = 1;
1844 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1845 info->nr_ring_pages = 1 << ring_page_order;
1848 for (i = 0; i < info->nr_rings; i++) {
1849 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1851 /* Create shared ring, alloc event channel. */
1852 err = setup_blkring(dev, rinfo);
1854 goto destroy_blkring;
1858 err = xenbus_transaction_start(&xbt);
1860 xenbus_dev_fatal(dev, err, "starting transaction");
1861 goto destroy_blkring;
1864 if (info->nr_ring_pages > 1) {
1865 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1868 message = "writing ring-page-order";
1869 goto abort_transaction;
1873 /* We already got the number of queues/rings in _probe */
1874 if (info->nr_rings == 1) {
1875 err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
1877 goto destroy_blkring;
1882 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1885 message = "writing multi-queue-num-queues";
1886 goto abort_transaction;
1889 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1890 path = kmalloc(pathsize, GFP_KERNEL);
1893 message = "ENOMEM while writing ring references";
1894 goto abort_transaction;
1897 for (i = 0; i < info->nr_rings; i++) {
1898 memset(path, 0, pathsize);
1899 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1900 err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
1903 goto destroy_blkring;
1908 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1909 XEN_IO_PROTO_ABI_NATIVE);
1911 message = "writing protocol";
1912 goto abort_transaction;
1914 err = xenbus_printf(xbt, dev->nodename,
1915 "feature-persistent", "%u", 1);
1918 "writing persistent grants feature to xenbus");
1920 err = xenbus_transaction_end(xbt, 0);
1924 xenbus_dev_fatal(dev, err, "completing transaction");
1925 goto destroy_blkring;
1928 for (i = 0; i < info->nr_rings; i++) {
1930 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1932 for (j = 0; j < BLK_RING_SIZE(info); j++)
1933 rinfo->shadow[j].req.u.rw.id = j + 1;
1934 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1936 xenbus_switch_state(dev, XenbusStateInitialised);
1941 xenbus_transaction_end(xbt, 1);
1943 xenbus_dev_fatal(dev, err, "%s", message);
1945 blkif_free(info, 0);
1948 dev_set_drvdata(&dev->dev, NULL);
1953 static int negotiate_mq(struct blkfront_info *info)
1955 unsigned int backend_max_queues = 0;
1959 BUG_ON(info->nr_rings);
1961 /* Check if backend supports multiple queues. */
1962 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1963 "multi-queue-max-queues", "%u", &backend_max_queues);
1965 backend_max_queues = 1;
1967 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1968 /* We need at least one ring. */
1969 if (!info->nr_rings)
1972 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1974 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1978 for (i = 0; i < info->nr_rings; i++) {
1979 struct blkfront_ring_info *rinfo;
1981 rinfo = &info->rinfo[i];
1982 INIT_LIST_HEAD(&rinfo->indirect_pages);
1983 INIT_LIST_HEAD(&rinfo->grants);
1984 rinfo->dev_info = info;
1985 INIT_WORK(&rinfo->work, blkif_restart_queue);
1986 spin_lock_init(&rinfo->ring_lock);
1991 * Entry point to this code when a new device is created. Allocate the basic
1992 * structures and the ring buffer for communication with the backend, and
1993 * inform the backend of the appropriate details for those. Switch to
1994 * Initialised state.
1996 static int blkfront_probe(struct xenbus_device *dev,
1997 const struct xenbus_device_id *id)
2000 struct blkfront_info *info;
2002 /* FIXME: Use dynamic device id if this is not set. */
2003 err = xenbus_scanf(XBT_NIL, dev->nodename,
2004 "virtual-device", "%i", &vdevice);
2006 /* go looking in the extended area instead */
2007 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
2010 xenbus_dev_fatal(dev, err, "reading virtual-device");
2015 if (xen_hvm_domain()) {
2018 /* no unplug has been done: do not hook devices != xen vbds */
2019 if (xen_has_pv_and_legacy_disk_devices()) {
2022 if (!VDEV_IS_EXTENDED(vdevice))
2023 major = BLKIF_MAJOR(vdevice);
2025 major = XENVBD_MAJOR;
2027 if (major != XENVBD_MAJOR) {
2029 "%s: HVM does not support vbd %d as xen block device\n",
2034 /* do not create a PV cdrom device if we are an HVM guest */
2035 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
2038 if (strncmp(type, "cdrom", 5) == 0) {
2044 info = kzalloc(sizeof(*info), GFP_KERNEL);
2046 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
2051 err = negotiate_mq(info);
2057 mutex_init(&info->mutex);
2058 info->vdevice = vdevice;
2059 info->connected = BLKIF_STATE_DISCONNECTED;
2061 /* Front end dir is a number, which is used as the id. */
2062 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
2063 dev_set_drvdata(&dev->dev, info);
2068 static void split_bio_end(struct bio *bio)
2070 struct split_bio *split_bio = bio->bi_private;
2072 if (atomic_dec_and_test(&split_bio->pending)) {
2073 split_bio->bio->bi_phys_segments = 0;
2074 split_bio->bio->bi_error = bio->bi_error;
2075 bio_endio(split_bio->bio);
2081 static int blkif_recover(struct blkfront_info *info)
2083 unsigned int i, r_index;
2084 struct request *req, *n;
2086 struct bio *bio, *cloned_bio;
2087 unsigned int segs, offset;
2089 struct split_bio *split_bio;
2091 blkfront_gather_backend_features(info);
2092 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2093 blkif_set_queue_limits(info);
2094 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2095 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2097 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2098 struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
2100 rc = blkfront_setup_indirect(rinfo);
2104 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2106 /* Now safe for us to use the shared ring */
2107 info->connected = BLKIF_STATE_CONNECTED;
2109 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2110 struct blkfront_ring_info *rinfo;
2112 rinfo = &info->rinfo[r_index];
2113 /* Kick any other new requests queued since we resumed */
2114 kick_pending_request_queues(rinfo);
2117 list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2118 /* Requeue pending requests (flush or discard) */
2119 list_del_init(&req->queuelist);
2120 BUG_ON(req->nr_phys_segments > segs);
2121 blk_mq_requeue_request(req);
2123 blk_mq_kick_requeue_list(info->rq);
2125 while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2126 /* Traverse the list of pending bios and re-queue them */
2127 if (bio_segments(bio) > segs) {
2129 * This bio has more segments than what we can
2130 * handle, we have to split it.
2132 pending = (bio_segments(bio) + segs - 1) / segs;
2133 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
2134 BUG_ON(split_bio == NULL);
2135 atomic_set(&split_bio->pending, pending);
2136 split_bio->bio = bio;
2137 for (i = 0; i < pending; i++) {
2138 offset = (i * segs * XEN_PAGE_SIZE) >> 9;
2139 size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
2140 (unsigned int)bio_sectors(bio) - offset);
2141 cloned_bio = bio_clone(bio, GFP_NOIO);
2142 BUG_ON(cloned_bio == NULL);
2143 bio_trim(cloned_bio, offset, size);
2144 cloned_bio->bi_private = split_bio;
2145 cloned_bio->bi_end_io = split_bio_end;
2146 submit_bio(cloned_bio);
2149 * Now we have to wait for all those smaller bios to
2150 * end, so we can also end the "parent" bio.
2154 /* We don't need to split this bio */
2162 * We are reconnecting to the backend, due to a suspend/resume, or a backend
2163 * driver restart. We tear down our blkif structure and recreate it, but
2164 * leave the device-layer structures intact so that this is transparent to the
2165 * rest of the kernel.
2167 static int blkfront_resume(struct xenbus_device *dev)
2169 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2173 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2175 bio_list_init(&info->bio_list);
2176 INIT_LIST_HEAD(&info->requests);
2177 for (i = 0; i < info->nr_rings; i++) {
2178 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2179 struct bio_list merge_bio;
2180 struct blk_shadow *shadow = rinfo->shadow;
2182 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2184 if (!shadow[j].request)
2188 * Get the bios in the request so we can re-queue them.
2190 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2191 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2192 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2193 shadow[j].request->cmd_flags & REQ_FUA) {
2195 * Flush operations don't contain bios, so
2196 * we need to requeue the whole request
2198 * XXX: but this doesn't make any sense for a
2199 * write with the FUA flag set..
2201 list_add(&shadow[j].request->queuelist, &info->requests);
2204 merge_bio.head = shadow[j].request->bio;
2205 merge_bio.tail = shadow[j].request->biotail;
2206 bio_list_merge(&info->bio_list, &merge_bio);
2207 shadow[j].request->bio = NULL;
2208 blk_mq_end_request(shadow[j].request, 0);
2212 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2214 err = negotiate_mq(info);
2218 err = talk_to_blkback(dev, info);
2220 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2223 * We have to wait for the backend to switch to
2224 * connected state, since we want to read which
2225 * features it supports.
2231 static void blkfront_closing(struct blkfront_info *info)
2233 struct xenbus_device *xbdev = info->xbdev;
2234 struct block_device *bdev = NULL;
2236 mutex_lock(&info->mutex);
2238 if (xbdev->state == XenbusStateClosing) {
2239 mutex_unlock(&info->mutex);
2244 bdev = bdget_disk(info->gd, 0);
2246 mutex_unlock(&info->mutex);
2249 xenbus_frontend_closed(xbdev);
2253 mutex_lock(&bdev->bd_mutex);
2255 if (bdev->bd_openers) {
2256 xenbus_dev_error(xbdev, -EBUSY,
2257 "Device in use; refusing to close");
2258 xenbus_switch_state(xbdev, XenbusStateClosing);
2260 xlvbd_release_gendisk(info);
2261 xenbus_frontend_closed(xbdev);
2264 mutex_unlock(&bdev->bd_mutex);
2268 static void blkfront_setup_discard(struct blkfront_info *info)
2271 unsigned int discard_granularity;
2272 unsigned int discard_alignment;
2273 unsigned int discard_secure;
2275 info->feature_discard = 1;
2276 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2277 "discard-granularity", "%u", &discard_granularity,
2278 "discard-alignment", "%u", &discard_alignment,
2281 info->discard_granularity = discard_granularity;
2282 info->discard_alignment = discard_alignment;
2284 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2285 "discard-secure", "%u", &discard_secure);
2287 info->feature_secdiscard = !!discard_secure;
2290 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2292 unsigned int psegs, grants;
2294 struct blkfront_info *info = rinfo->dev_info;
2296 if (info->max_indirect_segments == 0) {
2298 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2301 * When an extra req is required, the maximum
2302 * grants supported is related to the size of the
2303 * Linux block segment.
2305 grants = GRANTS_PER_PSEG;
2309 grants = info->max_indirect_segments;
2310 psegs = grants / GRANTS_PER_PSEG;
2312 err = fill_grant_buffer(rinfo,
2313 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2317 if (!info->bounce && info->max_indirect_segments) {
2319 * We are using indirect descriptors but don't have a bounce
2320 * buffer, we need to allocate a set of pages that can be
2321 * used for mapping indirect grefs
2323 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2325 BUG_ON(!list_empty(&rinfo->indirect_pages));
2326 for (i = 0; i < num; i++) {
2327 struct page *indirect_page = alloc_page(GFP_NOIO |
2331 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2335 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2336 rinfo->shadow[i].grants_used = kzalloc(
2337 sizeof(rinfo->shadow[i].grants_used[0]) * grants,
2339 rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
2340 if (info->max_indirect_segments)
2341 rinfo->shadow[i].indirect_grants = kzalloc(
2342 sizeof(rinfo->shadow[i].indirect_grants[0]) *
2343 INDIRECT_GREFS(grants),
2345 if ((rinfo->shadow[i].grants_used == NULL) ||
2346 (rinfo->shadow[i].sg == NULL) ||
2347 (info->max_indirect_segments &&
2348 (rinfo->shadow[i].indirect_grants == NULL)))
2350 sg_init_table(rinfo->shadow[i].sg, psegs);
2357 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2358 kfree(rinfo->shadow[i].grants_used);
2359 rinfo->shadow[i].grants_used = NULL;
2360 kfree(rinfo->shadow[i].sg);
2361 rinfo->shadow[i].sg = NULL;
2362 kfree(rinfo->shadow[i].indirect_grants);
2363 rinfo->shadow[i].indirect_grants = NULL;
2365 if (!list_empty(&rinfo->indirect_pages)) {
2366 struct page *indirect_page, *n;
2367 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2368 list_del(&indirect_page->lru);
2369 __free_page(indirect_page);
2376 * Gather all backend feature-*
2378 static void blkfront_gather_backend_features(struct blkfront_info *info)
2381 int barrier, flush, discard, persistent;
2382 unsigned int indirect_segments;
2384 info->feature_flush = 0;
2385 info->feature_fua = 0;
2387 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2388 "feature-barrier", "%d", &barrier);
2391 * If there's no "feature-barrier" defined, then it means
2392 * we're dealing with a very old backend which writes
2393 * synchronously; nothing to do.
2395 * If there are barriers, then we use flush.
2397 if (err > 0 && barrier) {
2398 info->feature_flush = 1;
2399 info->feature_fua = 1;
2403 * And if there is "feature-flush-cache" use that above
2406 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2407 "feature-flush-cache", "%d", &flush);
2409 if (err > 0 && flush) {
2410 info->feature_flush = 1;
2411 info->feature_fua = 0;
2414 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2415 "feature-discard", "%d", &discard);
2417 if (err > 0 && discard)
2418 blkfront_setup_discard(info);
2420 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2421 "feature-persistent", "%d", &persistent);
2423 info->feature_persistent = 0;
2425 info->feature_persistent = persistent;
2426 if (info->feature_persistent)
2427 info->bounce = true;
2429 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2430 "feature-max-indirect-segments", "%u",
2431 &indirect_segments);
2433 info->max_indirect_segments = 0;
2435 info->max_indirect_segments = min(indirect_segments,
2436 xen_blkif_max_segments);
2440 * Invoked when the backend is finally 'ready' (and has told produced
2441 * the details about the physical device - #sectors, size, etc).
2443 static void blkfront_connect(struct blkfront_info *info)
2445 unsigned long long sectors;
2446 unsigned long sector_size;
2447 unsigned int physical_sector_size;
2451 switch (info->connected) {
2452 case BLKIF_STATE_CONNECTED:
2454 * Potentially, the back-end may be signalling
2455 * a capacity change; update the capacity.
2457 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2458 "sectors", "%Lu", §ors);
2459 if (XENBUS_EXIST_ERR(err))
2461 printk(KERN_INFO "Setting capacity to %Lu\n",
2463 set_capacity(info->gd, sectors);
2464 revalidate_disk(info->gd);
2467 case BLKIF_STATE_SUSPENDED:
2469 * If we are recovering from suspension, we need to wait
2470 * for the backend to announce it's features before
2471 * reconnecting, at least we need to know if the backend
2472 * supports indirect descriptors, and how many.
2474 blkif_recover(info);
2481 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2482 __func__, info->xbdev->otherend);
2484 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2485 "sectors", "%llu", §ors,
2486 "info", "%u", &binfo,
2487 "sector-size", "%lu", §or_size,
2490 xenbus_dev_fatal(info->xbdev, err,
2491 "reading backend fields at %s",
2492 info->xbdev->otherend);
2497 * physcial-sector-size is a newer field, so old backends may not
2498 * provide this. Assume physical sector size to be the same as
2499 * sector_size in that case.
2501 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2502 "physical-sector-size", "%u", &physical_sector_size);
2504 physical_sector_size = sector_size;
2506 blkfront_gather_backend_features(info);
2507 for (i = 0; i < info->nr_rings; i++) {
2508 err = blkfront_setup_indirect(&info->rinfo[i]);
2510 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2511 info->xbdev->otherend);
2512 blkif_free(info, 0);
2517 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2518 physical_sector_size);
2520 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2521 info->xbdev->otherend);
2525 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2527 /* Kick pending requests. */
2528 info->connected = BLKIF_STATE_CONNECTED;
2529 for (i = 0; i < info->nr_rings; i++)
2530 kick_pending_request_queues(&info->rinfo[i]);
2532 device_add_disk(&info->xbdev->dev, info->gd);
2538 blkif_free(info, 0);
2543 * Callback received when the backend's state changes.
2545 static void blkback_changed(struct xenbus_device *dev,
2546 enum xenbus_state backend_state)
2548 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2550 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2552 switch (backend_state) {
2553 case XenbusStateInitWait:
2554 if (dev->state != XenbusStateInitialising)
2556 if (talk_to_blkback(dev, info))
2558 case XenbusStateInitialising:
2559 case XenbusStateInitialised:
2560 case XenbusStateReconfiguring:
2561 case XenbusStateReconfigured:
2562 case XenbusStateUnknown:
2565 case XenbusStateConnected:
2567 * talk_to_blkback sets state to XenbusStateInitialised
2568 * and blkfront_connect sets it to XenbusStateConnected
2569 * (if connection went OK).
2571 * If the backend (or toolstack) decides to poke at backend
2572 * state (and re-trigger the watch by setting the state repeatedly
2573 * to XenbusStateConnected (4)) we need to deal with this.
2574 * This is allowed as this is used to communicate to the guest
2575 * that the size of disk has changed!
2577 if ((dev->state != XenbusStateInitialised) &&
2578 (dev->state != XenbusStateConnected)) {
2579 if (talk_to_blkback(dev, info))
2583 blkfront_connect(info);
2586 case XenbusStateClosed:
2587 if (dev->state == XenbusStateClosed)
2589 /* Missed the backend's Closing state -- fallthrough */
2590 case XenbusStateClosing:
2592 blkfront_closing(info);
2597 static int blkfront_remove(struct xenbus_device *xbdev)
2599 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2600 struct block_device *bdev = NULL;
2601 struct gendisk *disk;
2603 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2608 blkif_free(info, 0);
2610 mutex_lock(&info->mutex);
2614 bdev = bdget_disk(disk, 0);
2617 mutex_unlock(&info->mutex);
2625 * The xbdev was removed before we reached the Closed
2626 * state. See if it's safe to remove the disk. If the bdev
2627 * isn't closed yet, we let release take care of it.
2630 mutex_lock(&bdev->bd_mutex);
2631 info = disk->private_data;
2633 dev_warn(disk_to_dev(disk),
2634 "%s was hot-unplugged, %d stale handles\n",
2635 xbdev->nodename, bdev->bd_openers);
2637 if (info && !bdev->bd_openers) {
2638 xlvbd_release_gendisk(info);
2639 disk->private_data = NULL;
2643 mutex_unlock(&bdev->bd_mutex);
2649 static int blkfront_is_ready(struct xenbus_device *dev)
2651 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2653 return info->is_ready && info->xbdev;
2656 static int blkif_open(struct block_device *bdev, fmode_t mode)
2658 struct gendisk *disk = bdev->bd_disk;
2659 struct blkfront_info *info;
2662 mutex_lock(&blkfront_mutex);
2664 info = disk->private_data;
2671 mutex_lock(&info->mutex);
2674 /* xbdev is closed */
2677 mutex_unlock(&info->mutex);
2680 mutex_unlock(&blkfront_mutex);
2684 static void blkif_release(struct gendisk *disk, fmode_t mode)
2686 struct blkfront_info *info = disk->private_data;
2687 struct block_device *bdev;
2688 struct xenbus_device *xbdev;
2690 mutex_lock(&blkfront_mutex);
2692 bdev = bdget_disk(disk, 0);
2695 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2698 if (bdev->bd_openers)
2702 * Check if we have been instructed to close. We will have
2703 * deferred this request, because the bdev was still open.
2706 mutex_lock(&info->mutex);
2707 xbdev = info->xbdev;
2709 if (xbdev && xbdev->state == XenbusStateClosing) {
2710 /* pending switch to state closed */
2711 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2712 xlvbd_release_gendisk(info);
2713 xenbus_frontend_closed(info->xbdev);
2716 mutex_unlock(&info->mutex);
2719 /* sudden device removal */
2720 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2721 xlvbd_release_gendisk(info);
2722 disk->private_data = NULL;
2729 mutex_unlock(&blkfront_mutex);
2732 static const struct block_device_operations xlvbd_block_fops =
2734 .owner = THIS_MODULE,
2736 .release = blkif_release,
2737 .getgeo = blkif_getgeo,
2738 .ioctl = blkif_ioctl,
2742 static const struct xenbus_device_id blkfront_ids[] = {
2747 static struct xenbus_driver blkfront_driver = {
2748 .ids = blkfront_ids,
2749 .probe = blkfront_probe,
2750 .remove = blkfront_remove,
2751 .resume = blkfront_resume,
2752 .otherend_changed = blkback_changed,
2753 .is_ready = blkfront_is_ready,
2756 static int __init xlblk_init(void)
2759 int nr_cpus = num_online_cpus();
2764 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2765 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2766 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2767 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2770 if (xen_blkif_max_queues > nr_cpus) {
2771 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2772 xen_blkif_max_queues, nr_cpus);
2773 xen_blkif_max_queues = nr_cpus;
2776 if (!xen_has_pv_disk_devices())
2779 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2780 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2781 XENVBD_MAJOR, DEV_NAME);
2785 ret = xenbus_register_frontend(&blkfront_driver);
2787 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2793 module_init(xlblk_init);
2796 static void __exit xlblk_exit(void)
2798 xenbus_unregister_driver(&blkfront_driver);
2799 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2802 module_exit(xlblk_exit);
2804 MODULE_DESCRIPTION("Xen virtual block device frontend");
2805 MODULE_LICENSE("GPL");
2806 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2807 MODULE_ALIAS("xen:vbd");
2808 MODULE_ALIAS("xenblk");