1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xenbus code for blkif backend
3 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4 Copyright (C) 2005 XenSource Ltd
9 #define pr_fmt(fmt) "xen-blkback: " fmt
11 #include <linux/module.h>
12 #include <linux/kthread.h>
13 #include <xen/events.h>
14 #include <xen/grant_table.h>
17 /* On the XenBus the max length of 'ring-ref%u'. */
18 #define RINGREF_NAME_LEN (20)
21 struct xenbus_device *dev;
22 struct xen_blkif *blkif;
23 struct xenbus_watch backend_watch;
29 static struct kmem_cache *xen_blkif_cachep;
30 static void connect(struct backend_info *);
31 static int connect_ring(struct backend_info *);
32 static void backend_changed(struct xenbus_watch *, const char *,
34 static void xen_blkif_free(struct xen_blkif *blkif);
35 static void xen_vbd_free(struct xen_vbd *vbd);
37 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
43 * The last request could free the device from softirq context and
44 * xen_blkif_free() can sleep.
46 static void xen_blkif_deferred_free(struct work_struct *work)
48 struct xen_blkif *blkif;
50 blkif = container_of(work, struct xen_blkif, free_work);
51 xen_blkif_free(blkif);
54 static int blkback_name(struct xen_blkif *blkif, char *buf)
56 char *devpath, *devname;
57 struct xenbus_device *dev = blkif->be->dev;
59 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
61 return PTR_ERR(devpath);
63 devname = strstr(devpath, "/dev/");
65 devname += strlen("/dev/");
69 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
75 static void xen_update_blkif_status(struct xen_blkif *blkif)
78 char name[TASK_COMM_LEN];
79 struct xen_blkif_ring *ring;
82 /* Not ready to connect? */
83 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
86 /* Already connected? */
87 if (blkif->be->dev->state == XenbusStateConnected)
90 /* Attempt to connect: exit if we fail to. */
92 if (blkif->be->dev->state != XenbusStateConnected)
95 err = blkback_name(blkif, name);
97 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
101 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
103 xenbus_dev_error(blkif->be->dev, err, "block flush");
106 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
108 for (i = 0; i < blkif->nr_rings; i++) {
109 ring = &blkif->rings[i];
110 ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
111 if (IS_ERR(ring->xenblkd)) {
112 err = PTR_ERR(ring->xenblkd);
113 ring->xenblkd = NULL;
114 xenbus_dev_fatal(blkif->be->dev, err,
115 "start %s-%d xenblkd", name, i);
123 ring = &blkif->rings[i];
124 kthread_stop(ring->xenblkd);
129 static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
133 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
138 for (r = 0; r < blkif->nr_rings; r++) {
139 struct xen_blkif_ring *ring = &blkif->rings[r];
141 spin_lock_init(&ring->blk_ring_lock);
142 init_waitqueue_head(&ring->wq);
143 INIT_LIST_HEAD(&ring->pending_free);
144 INIT_LIST_HEAD(&ring->persistent_purge_list);
145 INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
146 gnttab_page_cache_init(&ring->free_pages);
148 spin_lock_init(&ring->pending_free_lock);
149 init_waitqueue_head(&ring->pending_free_wq);
150 init_waitqueue_head(&ring->shutdown_wq);
152 ring->st_print = jiffies;
159 static struct xen_blkif *xen_blkif_alloc(domid_t domid)
161 struct xen_blkif *blkif;
163 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
165 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
167 return ERR_PTR(-ENOMEM);
169 blkif->domid = domid;
170 atomic_set(&blkif->refcnt, 1);
171 init_completion(&blkif->drain_complete);
174 * Because freeing back to the cache may be deferred, it is not
175 * safe to unload the module (and hence destroy the cache) until
176 * this has completed. To prevent premature unloading, take an
177 * extra module reference here and release only when the object
178 * has been freed back to the cache.
180 __module_get(THIS_MODULE);
181 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
186 static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
187 unsigned int nr_grefs, unsigned int evtchn)
190 struct xen_blkif *blkif = ring->blkif;
191 const struct blkif_common_sring *sring_common;
192 RING_IDX rsp_prod, req_prod;
195 /* Already connected through? */
199 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
204 sring_common = (struct blkif_common_sring *)ring->blk_ring;
205 rsp_prod = READ_ONCE(sring_common->rsp_prod);
206 req_prod = READ_ONCE(sring_common->req_prod);
208 switch (blkif->blk_protocol) {
209 case BLKIF_PROTOCOL_NATIVE:
211 struct blkif_sring *sring_native =
212 (struct blkif_sring *)ring->blk_ring;
214 BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
215 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
216 size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
219 case BLKIF_PROTOCOL_X86_32:
221 struct blkif_x86_32_sring *sring_x86_32 =
222 (struct blkif_x86_32_sring *)ring->blk_ring;
224 BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
225 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
226 size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
229 case BLKIF_PROTOCOL_X86_64:
231 struct blkif_x86_64_sring *sring_x86_64 =
232 (struct blkif_x86_64_sring *)ring->blk_ring;
234 BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
235 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
236 size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
244 if (req_prod - rsp_prod > size)
247 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
248 evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
256 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
257 ring->blk_rings.common.sring = NULL;
261 static int xen_blkif_disconnect(struct xen_blkif *blkif)
263 struct pending_req *req, *n;
267 for (r = 0; r < blkif->nr_rings; r++) {
268 struct xen_blkif_ring *ring = &blkif->rings[r];
275 kthread_stop(ring->xenblkd);
276 ring->xenblkd = NULL;
277 wake_up(&ring->shutdown_wq);
280 /* The above kthread_stop() guarantees that at this point we
281 * don't have any discard_io or other_io requests. So, checking
282 * for inflight IO is enough.
284 if (atomic_read(&ring->inflight) > 0) {
290 unbind_from_irqhandler(ring->irq, ring);
294 if (ring->blk_rings.common.sring) {
295 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
296 ring->blk_rings.common.sring = NULL;
299 /* Remove all persistent grants and the cache of ballooned pages. */
300 xen_blkbk_free_caches(ring);
302 /* Check that there is no request in use */
303 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
304 list_del(&req->free_list);
306 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
307 kfree(req->segments[j]);
309 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
310 kfree(req->indirect_pages[j]);
316 BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
317 BUG_ON(!list_empty(&ring->persistent_purge_list));
318 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
319 BUG_ON(ring->free_pages.num_pages != 0);
320 BUG_ON(ring->persistent_gnt_c != 0);
321 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
322 ring->active = false;
327 blkif->nr_ring_pages = 0;
329 * blkif->rings was allocated in connect_ring, so we should free it in
339 static void xen_blkif_free(struct xen_blkif *blkif)
341 WARN_ON(xen_blkif_disconnect(blkif));
342 xen_vbd_free(&blkif->vbd);
343 kfree(blkif->be->mode);
346 /* Make sure everything is drained before shutting down */
347 kmem_cache_free(xen_blkif_cachep, blkif);
348 module_put(THIS_MODULE);
351 int __init xen_blkif_interface_init(void)
353 xen_blkif_cachep = kmem_cache_create("blkif_cache",
354 sizeof(struct xen_blkif),
356 if (!xen_blkif_cachep)
362 void xen_blkif_interface_fini(void)
364 kmem_cache_destroy(xen_blkif_cachep);
365 xen_blkif_cachep = NULL;
369 * sysfs interface for VBD I/O requests
372 #define VBD_SHOW_ALLRING(name, format) \
373 static ssize_t show_##name(struct device *_dev, \
374 struct device_attribute *attr, \
377 struct xenbus_device *dev = to_xenbus_device(_dev); \
378 struct backend_info *be = dev_get_drvdata(&dev->dev); \
379 struct xen_blkif *blkif = be->blkif; \
381 unsigned long long result = 0; \
386 for (i = 0; i < blkif->nr_rings; i++) { \
387 struct xen_blkif_ring *ring = &blkif->rings[i]; \
389 result += ring->st_##name; \
393 return sprintf(buf, format, result); \
395 static DEVICE_ATTR(name, 0444, show_##name, NULL)
397 VBD_SHOW_ALLRING(oo_req, "%llu\n");
398 VBD_SHOW_ALLRING(rd_req, "%llu\n");
399 VBD_SHOW_ALLRING(wr_req, "%llu\n");
400 VBD_SHOW_ALLRING(f_req, "%llu\n");
401 VBD_SHOW_ALLRING(ds_req, "%llu\n");
402 VBD_SHOW_ALLRING(rd_sect, "%llu\n");
403 VBD_SHOW_ALLRING(wr_sect, "%llu\n");
405 static struct attribute *xen_vbdstat_attrs[] = {
406 &dev_attr_oo_req.attr,
407 &dev_attr_rd_req.attr,
408 &dev_attr_wr_req.attr,
409 &dev_attr_f_req.attr,
410 &dev_attr_ds_req.attr,
411 &dev_attr_rd_sect.attr,
412 &dev_attr_wr_sect.attr,
416 static const struct attribute_group xen_vbdstat_group = {
417 .name = "statistics",
418 .attrs = xen_vbdstat_attrs,
421 #define VBD_SHOW(name, format, args...) \
422 static ssize_t show_##name(struct device *_dev, \
423 struct device_attribute *attr, \
426 struct xenbus_device *dev = to_xenbus_device(_dev); \
427 struct backend_info *be = dev_get_drvdata(&dev->dev); \
429 return sprintf(buf, format, ##args); \
431 static DEVICE_ATTR(name, 0444, show_##name, NULL)
433 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
434 VBD_SHOW(mode, "%s\n", be->mode);
436 static int xenvbd_sysfs_addif(struct xenbus_device *dev)
440 error = device_create_file(&dev->dev, &dev_attr_physical_device);
444 error = device_create_file(&dev->dev, &dev_attr_mode);
448 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
454 fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
455 fail2: device_remove_file(&dev->dev, &dev_attr_mode);
456 fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
460 static void xenvbd_sysfs_delif(struct xenbus_device *dev)
462 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
463 device_remove_file(&dev->dev, &dev_attr_mode);
464 device_remove_file(&dev->dev, &dev_attr_physical_device);
467 static void xen_vbd_free(struct xen_vbd *vbd)
470 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
474 /* Enable the persistent grants feature. */
475 static bool feature_persistent = true;
476 module_param(feature_persistent, bool, 0644);
477 MODULE_PARM_DESC(feature_persistent,
478 "Enables the persistent grants feature");
480 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
481 unsigned major, unsigned minor, int readonly,
485 struct block_device *bdev;
486 struct request_queue *q;
489 vbd->handle = handle;
490 vbd->readonly = readonly;
493 vbd->pdevice = MKDEV(major, minor);
495 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
496 FMODE_READ : FMODE_WRITE, NULL);
499 pr_warn("xen_vbd_create: device %08x could not be opened\n",
505 if (vbd->bdev->bd_disk == NULL) {
506 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
511 vbd->size = vbd_sz(vbd);
513 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
514 vbd->type |= VDISK_CDROM;
515 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
516 vbd->type |= VDISK_REMOVABLE;
518 q = bdev_get_queue(bdev);
519 if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
520 vbd->flush_support = true;
522 if (q && blk_queue_secure_erase(q))
523 vbd->discard_secure = true;
525 vbd->feature_gnt_persistent = feature_persistent;
527 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
528 handle, blkif->domid);
532 static int xen_blkbk_remove(struct xenbus_device *dev)
534 struct backend_info *be = dev_get_drvdata(&dev->dev);
536 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
538 if (be->major || be->minor)
539 xenvbd_sysfs_delif(dev);
541 if (be->backend_watch.node) {
542 unregister_xenbus_watch(&be->backend_watch);
543 kfree(be->backend_watch.node);
544 be->backend_watch.node = NULL;
547 dev_set_drvdata(&dev->dev, NULL);
550 xen_blkif_disconnect(be->blkif);
552 /* Put the reference we set in xen_blkif_alloc(). */
553 xen_blkif_put(be->blkif);
559 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
560 struct backend_info *be, int state)
562 struct xenbus_device *dev = be->dev;
565 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
568 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
573 static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
575 struct xenbus_device *dev = be->dev;
576 struct xen_blkif *blkif = be->blkif;
579 struct block_device *bdev = be->blkif->vbd.bdev;
580 struct request_queue *q = bdev_get_queue(bdev);
582 if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
585 if (blk_queue_discard(q)) {
586 err = xenbus_printf(xbt, dev->nodename,
587 "discard-granularity", "%u",
588 q->limits.discard_granularity);
590 dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
593 err = xenbus_printf(xbt, dev->nodename,
594 "discard-alignment", "%u",
595 q->limits.discard_alignment);
597 dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
602 err = xenbus_printf(xbt, dev->nodename,
603 "discard-secure", "%d",
604 blkif->vbd.discard_secure);
606 dev_warn(&dev->dev, "writing discard-secure (%d)", err);
610 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
613 dev_warn(&dev->dev, "writing feature-discard (%d)", err);
616 int xen_blkbk_barrier(struct xenbus_transaction xbt,
617 struct backend_info *be, int state)
619 struct xenbus_device *dev = be->dev;
622 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
625 dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
631 * Entry point to this code when a new device is created. Allocate the basic
632 * structures, and watch the store waiting for the hotplug scripts to tell us
633 * the device's physical major and minor numbers. Switch to InitWait.
635 static int xen_blkbk_probe(struct xenbus_device *dev,
636 const struct xenbus_device_id *id)
639 struct backend_info *be = kzalloc(sizeof(struct backend_info),
642 /* match the pr_debug in xen_blkbk_remove */
643 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
646 xenbus_dev_fatal(dev, -ENOMEM,
647 "allocating backend structure");
651 dev_set_drvdata(&dev->dev, be);
653 be->blkif = xen_blkif_alloc(dev->otherend_id);
654 if (IS_ERR(be->blkif)) {
655 err = PTR_ERR(be->blkif);
657 xenbus_dev_fatal(dev, err, "creating block interface");
661 err = xenbus_printf(XBT_NIL, dev->nodename,
662 "feature-max-indirect-segments", "%u",
663 MAX_INDIRECT_SEGMENTS);
666 "writing %s/feature-max-indirect-segments (%d)",
669 /* Multi-queue: advertise how many queues are supported by us.*/
670 err = xenbus_printf(XBT_NIL, dev->nodename,
671 "multi-queue-max-queues", "%u", xenblk_max_queues);
673 pr_warn("Error writing multi-queue-max-queues\n");
675 /* setup back pointer */
678 err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
680 "%s/%s", dev->nodename, "physical-device");
684 err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
685 xen_blkif_max_ring_order);
687 pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
689 err = xenbus_switch_state(dev, XenbusStateInitWait);
696 pr_warn("%s failed\n", __func__);
697 xen_blkbk_remove(dev);
702 * Callback received when the hotplug scripts have placed the physical-device
703 * node. Read it and the mode node, and create a vbd. If the frontend is
706 static void backend_changed(struct xenbus_watch *watch,
707 const char *path, const char *token)
712 struct backend_info *be
713 = container_of(watch, struct backend_info, backend_watch);
714 struct xenbus_device *dev = be->dev;
716 unsigned long handle;
719 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
721 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
723 if (XENBUS_EXIST_ERR(err)) {
725 * Since this watch will fire once immediately after it is
726 * registered, we expect this. Ignore it, and wait for the
732 xenbus_dev_fatal(dev, err, "reading physical-device");
736 if (be->major | be->minor) {
737 if (be->major != major || be->minor != minor)
738 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
739 be->major, be->minor, major, minor);
743 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
744 if (IS_ERR(be->mode)) {
745 err = PTR_ERR(be->mode);
747 xenbus_dev_fatal(dev, err, "reading mode");
751 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
752 if (!IS_ERR(device_type)) {
753 cdrom = strcmp(device_type, "cdrom") == 0;
757 /* Front end dir is a number, which is used as the handle. */
758 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
768 err = xen_vbd_create(be->blkif, handle, major, minor,
769 !strchr(be->mode, 'w'), cdrom);
772 xenbus_dev_fatal(dev, err, "creating vbd structure");
774 err = xenvbd_sysfs_addif(dev);
776 xen_vbd_free(&be->blkif->vbd);
777 xenbus_dev_fatal(dev, err, "creating sysfs entries");
787 /* We're potentially connected now */
788 xen_update_blkif_status(be->blkif);
793 * Callback received when the frontend's state changes.
795 static void frontend_changed(struct xenbus_device *dev,
796 enum xenbus_state frontend_state)
798 struct backend_info *be = dev_get_drvdata(&dev->dev);
801 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
803 switch (frontend_state) {
804 case XenbusStateInitialising:
805 if (dev->state == XenbusStateClosed) {
806 pr_info("%s: prepare for reconnect\n", dev->nodename);
807 xenbus_switch_state(dev, XenbusStateInitWait);
811 case XenbusStateInitialised:
812 case XenbusStateConnected:
814 * Ensure we connect even when two watches fire in
815 * close succession and we miss the intermediate value
818 if (dev->state == XenbusStateConnected)
822 * Enforce precondition before potential leak point.
823 * xen_blkif_disconnect() is idempotent.
825 err = xen_blkif_disconnect(be->blkif);
827 xenbus_dev_fatal(dev, err, "pending I/O");
831 err = connect_ring(be);
834 * Clean up so that memory resources can be used by
835 * other devices. connect_ring reported already error.
837 xen_blkif_disconnect(be->blkif);
840 xen_update_blkif_status(be->blkif);
843 case XenbusStateClosing:
844 xenbus_switch_state(dev, XenbusStateClosing);
847 case XenbusStateClosed:
848 xen_blkif_disconnect(be->blkif);
849 xenbus_switch_state(dev, XenbusStateClosed);
850 if (xenbus_dev_is_online(dev))
854 case XenbusStateUnknown:
855 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
856 device_unregister(&dev->dev);
860 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
866 /* Once a memory pressure is detected, squeeze free page pools for a while. */
867 static unsigned int buffer_squeeze_duration_ms = 10;
868 module_param_named(buffer_squeeze_duration_ms,
869 buffer_squeeze_duration_ms, int, 0644);
870 MODULE_PARM_DESC(buffer_squeeze_duration_ms,
871 "Duration in ms to squeeze pages buffer when a memory pressure is detected");
874 * Callback received when the memory pressure is detected.
876 static void reclaim_memory(struct xenbus_device *dev)
878 struct backend_info *be = dev_get_drvdata(&dev->dev);
882 be->blkif->buffer_squeeze_end = jiffies +
883 msecs_to_jiffies(buffer_squeeze_duration_ms);
886 /* ** Connection ** */
889 * Write the physical details regarding the block device to the store, and
890 * switch to Connected state.
892 static void connect(struct backend_info *be)
894 struct xenbus_transaction xbt;
896 struct xenbus_device *dev = be->dev;
898 pr_debug("%s %s\n", __func__, dev->otherend);
900 /* Supply the information about the device the frontend needs */
902 err = xenbus_transaction_start(&xbt);
904 xenbus_dev_fatal(dev, err, "starting transaction");
908 /* If we can't advertise it is OK. */
909 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
911 xen_blkbk_discard(xbt, be);
913 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
915 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
916 be->blkif->vbd.feature_gnt_persistent);
918 xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
923 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
924 (unsigned long long)vbd_sz(&be->blkif->vbd));
926 xenbus_dev_fatal(dev, err, "writing %s/sectors",
931 /* FIXME: use a typename instead */
932 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
933 be->blkif->vbd.type |
934 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
936 xenbus_dev_fatal(dev, err, "writing %s/info",
940 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
942 bdev_logical_block_size(be->blkif->vbd.bdev));
944 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
948 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
949 bdev_physical_block_size(be->blkif->vbd.bdev));
951 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
954 err = xenbus_transaction_end(xbt, 0);
958 xenbus_dev_fatal(dev, err, "ending transaction");
960 err = xenbus_switch_state(dev, XenbusStateConnected);
962 xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
967 xenbus_transaction_end(xbt, 1);
971 * Each ring may have multi pages, depends on "ring-page-order".
973 static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
975 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
976 struct pending_req *req, *n;
978 struct xen_blkif *blkif = ring->blkif;
979 struct xenbus_device *dev = blkif->be->dev;
980 unsigned int nr_grefs, evtchn;
982 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
986 xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
990 nr_grefs = blkif->nr_ring_pages;
992 if (unlikely(!nr_grefs)) {
997 for (i = 0; i < nr_grefs; i++) {
998 char ring_ref_name[RINGREF_NAME_LEN];
1000 if (blkif->multi_ref)
1001 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1004 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
1007 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
1008 "%u", &ring_ref[i]);
1012 xenbus_dev_fatal(dev, err, "reading %s/%s",
1013 dir, ring_ref_name);
1019 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
1020 req = kzalloc(sizeof(*req), GFP_KERNEL);
1023 list_add_tail(&req->free_list, &ring->pending_free);
1024 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1025 req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
1026 if (!req->segments[j])
1029 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1030 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
1032 if (!req->indirect_pages[j])
1037 /* Map the shared frame, irq etc. */
1038 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
1040 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
1047 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
1048 list_del(&req->free_list);
1049 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1050 if (!req->segments[j])
1052 kfree(req->segments[j]);
1054 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1055 if (!req->indirect_pages[j])
1057 kfree(req->indirect_pages[j]);
1064 static int connect_ring(struct backend_info *be)
1066 struct xenbus_device *dev = be->dev;
1067 struct xen_blkif *blkif = be->blkif;
1068 char protocol[64] = "";
1072 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1073 unsigned int requested_num_queues = 0;
1074 unsigned int ring_page_order;
1076 pr_debug("%s %s\n", __func__, dev->otherend);
1078 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1079 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1082 strcpy(protocol, "unspecified, assuming default");
1083 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1084 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1085 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1086 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1087 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1088 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1090 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1093 if (blkif->vbd.feature_gnt_persistent)
1094 blkif->vbd.feature_gnt_persistent =
1095 xenbus_read_unsigned(dev->otherend,
1096 "feature-persistent", 0);
1098 blkif->vbd.overflow_max_grants = 0;
1101 * Read the number of hardware queues from frontend.
1103 requested_num_queues = xenbus_read_unsigned(dev->otherend,
1104 "multi-queue-num-queues",
1106 if (requested_num_queues > xenblk_max_queues
1107 || requested_num_queues == 0) {
1108 /* Buggy or malicious guest. */
1109 xenbus_dev_fatal(dev, err,
1110 "guest requested %u queues, exceeding the maximum of %u.",
1111 requested_num_queues, xenblk_max_queues);
1114 blkif->nr_rings = requested_num_queues;
1115 if (xen_blkif_alloc_rings(blkif))
1118 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1119 blkif->nr_rings, blkif->blk_protocol, protocol,
1120 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1122 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
1125 blkif->nr_ring_pages = 1;
1126 blkif->multi_ref = false;
1127 } else if (ring_page_order <= xen_blkif_max_ring_order) {
1128 blkif->nr_ring_pages = 1 << ring_page_order;
1129 blkif->multi_ref = true;
1132 xenbus_dev_fatal(dev, err,
1133 "requested ring page order %d exceed max:%d",
1135 xen_blkif_max_ring_order);
1139 if (blkif->nr_rings == 1)
1140 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1142 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1143 xspath = kmalloc(xspathsize, GFP_KERNEL);
1145 xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1149 for (i = 0; i < blkif->nr_rings; i++) {
1150 memset(xspath, 0, xspathsize);
1151 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1152 err = read_per_ring_refs(&blkif->rings[i], xspath);
1163 static const struct xenbus_device_id xen_blkbk_ids[] = {
1168 static struct xenbus_driver xen_blkbk_driver = {
1169 .ids = xen_blkbk_ids,
1170 .probe = xen_blkbk_probe,
1171 .remove = xen_blkbk_remove,
1172 .otherend_changed = frontend_changed,
1173 .allow_rebind = true,
1174 .reclaim_memory = reclaim_memory,
1177 int xen_blkif_xenbus_init(void)
1179 return xenbus_register_backend(&xen_blkbk_driver);
1182 void xen_blkif_xenbus_fini(void)
1184 xenbus_unregister_driver(&xen_blkbk_driver);